I have tried two different ways of creating named constraints in loops and both fail:
temp_pred_constraints = Vector{JuMP.ConstraintRef}
for i = 1:128
@constraint(m, temp_pred_constraints[i], dot(TM[i,:], u - u_cur_v) + T_cur[i] == T_pred[i])
end
and
for i = 1:128
@constraint(m, eval(Meta.parse("temp_pred_$i")), dot(TM[i,:], u - u_cur_v) + T_cur[i] == T_pred[i])
end
temp_pred_constraints = [
@constraint(m, dot(TM[i,:], u - u_cur_v) + T_cur[i] == T_pred[i])
for i = 1:128
]
for i = 1:128
set_name(temp_pred_constraints[i], "temp_pred_constraints[$(i)]")
end
P.S., to people with the same issue:
Here’s a solved example where I create named constraints in a nested for-loop,
which is more suitable for more general cases where the list comprehension gets unwieldy.
It’s a unit-commitment setting with convex function from control u to power consumption pwr.
pwr_pwl = Array{JuMP.ConstraintRef, 2}(undef, n_ahus, n_psegs)
for i = 1:n_ahus
max_pwr = power_fns[i](1.0)
for j = 0:n_psegs-1
# create evenly spaced grid of linear under-approximations between fs_lims[min] and [max]
x0 = ((n_psegs - 1 - j)/(n_psegs-1)) * fs_lims[i, :min] +
(j/(n_psegs-1)) * fs_lims[i, :max]
y0 = power_fns[i](x0)
m = derivative(power_fns[i])(x0)
# println("basepoint for ahu $i, seg $j is $x0")
pwr_pwl[i, j+1] = @constraint(lin_mdl, pwr[i] >= y0 + m*(u[i] - x0) - (1-u_on[i]) * max_pwr)
set_name(pwr_pwl[i, j+1], "pwr_pwl_ahu$(i)_p$(j)")
end
end