Is there an equivalent to Optim.jl's only_fg! in Optimization.jl?

Optim.jl implements an only_fg! function that allows calculations made in the objective function, f, to be passed to the gradient, g. By constructing an fg! function, you can avoid redundant calculations that are required by both f and g.

Does Optimization.jl have something like this?

Hi @anthony-meza, welcome to the forum :smile:

I haven’t really used Optimisation.jl, so I’m not much help, but I assume no.

You can achieve something similar with memoization:

julia> function cache_f_and_g!(f_and_g!, dimension)
           last_u = nothing
           last_f, last_grad = 0.0, zeros(Float64, dimension)
           function update!(u, p)
               if u != last_u
                   println("Updating cache")
                   last_f = f_and_g!(last_grad, u, p)
                   last_u = u
               end
           end
           function f(u, p)
               update!(u, p)
               return last_f
           end
           function grad(G,u,p)
               update!(u, p)
               G .= last_grad
           end
           return f, grad
       end
cache_f_and_g! (generic function with 1 method)

julia> function f_and_g!(res, u, p)
           res .= 2.0 .* u
           return u[1]^2 + u[2]^2
       end
f_and_g! (generic function with 1 method)

julia> f, grad = cache_f_and_g!(f_and_g!, 2)
(var"#f#cache_f_and_g!##7"{var"#update!#cache_f_and_g!##6"{typeof(f_and_g!), Vector{Float64}}}(var"#update!#cache_f_and_g!##6"{typeof(f_and_g!), Vector{Float64}}(f_and_g!, [0.0, 0.0], Core.Box(0.0), Core.Box(nothing)), Core.Box(0.0)), var"#grad#cache_f_and_g!##8"{var"#update!#cache_f_and_g!##6"{typeof(f_and_g!), Vector{Float64}}, Vector{Float64}}(var"#update!#cache_f_and_g!##6"{typeof(f_and_g!), Vector{Float64}}(f_and_g!, [0.0, 0.0], Core.Box(0.0), Core.Box(nothing)), [0.0, 0.0]))

julia> f([1.0, 2.0], nothing)
Updating cache
5.0

julia> f([1.0, 2.0], nothing)
5.0

julia> f([1.0, 2.0], nothing)
5.0

julia> G = zeros(2)
2-element Vector{Float64}:
 0.0
 0.0

julia> grad(G, [1.0, 2.0], nothing)
2-element Vector{Float64}:
 2.0
 4.0

julia> grad(G, [1.0, 2.3], nothing)
Updating cache
2-element Vector{Float64}:
 2.0
 4.6