I am struggling to speed up the computation of solutions for a finite horizon dynamic programming problem. I am using interpolation to avoid computing the value function outside of points in the grid. Right now it takes my computer around 7 minutes to solve this 5-period model.

Please have a look and let me know what I can improve. Any suggestions you may have will help me!

Thanks!

```
# VFI finite horizon
using Optim
using Distributions
using Random
using Interpolations
# transformation functions
# open interval (a, b)
function logit(x; a = 0, b = 1)
(b - a) * (exp(x)/(1 + exp(x))) + a
end
function utility(c::R, L::R) where R <: Real
if c <= 0 || L <= 0
return -Inf
else
return log(c) + log(1 - L)
end
end
const T = 5 # terminal period
const Î˛ = 0.95 # discount factor
const r = 0.05 # interest rate
const dist = LogNormal(0, 1) # distribution of Îľ
const n = 1 # number of points of support of Îľ
Random.seed!(1234)
const w = Vector{Float64}(undef, T) # exogenous wages
w .= (900 .+ 20.0 .* (1:T) .- 0.5 .* (1:T).^2)
const grid_A = -1000:10:8_000
const Îľ = quantile.(dist, 0:1/n:prevfloat(1.0))
const Vdict = Array{Float64}(undef, (n, length(grid_A), T))
const Cdict = Array{Float64}(undef, (n, length(grid_A), T))
const Ldict = Array{Float64}(undef, (n, length(grid_A), T))
const A1dict = Array{Float64}(undef, (n, length(grid_A), T))
const convdict = Array{Bool}(undef, (n, length(grid_A), T))
# solve last period
for s in 1:length(grid_A)
for i in 1:n
opt = optimize( x -> -utility(w[T]*x + grid_A[s]*(1+r), x), 0.0, 1.0 )
Ldict[i, s, T] = Optim.minimizer(opt)
Cdict[i, s, T] = (w[T] + Îľ[i])*Ldict[i, s, T] + grid_A[s]*(1+r)
A1dict[i, s, T] = 0.0
Vdict[i, s, T] = -Optim.minimum(opt)
end
end
interp_func(t) = LinearInterpolation(grid_A, sum(Vdict[i, :, t] for i in 1:n) ./ n, extrapolation_bc = Line())
# solve rest of periods
@time for t in T-1:-1:1
for s in 1:length(grid_A)
for i in 1:n
initial_x = [A1dict[i, s, t+1], 0.0]
# x[1] is assets to carry forward, x[2] is labor supply
opt = optimize( x -> -(utility(logit(x[2])*(w[t] + Îľ[i])+ grid_A[s]*(1+r) - x[1], logit(x[2])) +
Î˛*interp_func(t+1)(x[1]) ),
initial_x, method = NewtonTrustRegion(), autodiff = :forward, iterations = 100_000)
Vdict[i, s, t] = -Optim.minimum(opt)
xstar = Optim.minimizer(opt)
Ldict[i, s, t] = logit(xstar[2])
A1dict[i, s, t] = xstar[1]
Cdict[i, s, t] = Ldict[i, s, t] * (w[t] + Îľ[i]) + grid_A[s] * (1+r) - A1dict[i, s, t]
convdict[i, s, t] = Optim.converged(opt)
end
end
println("period ", t, " finished")
end
```