I have a very complicated objective function which involves some distributed call. I would like to optimize the function using Optim with Forward Difference. However, it reports an error. This is the MWE:
using Distributed
addprocs(Sys.CPU_THREADS-1)
@everywhere function fun_single(x)
return x^2
end
# Not sure why need to make this function everywhere, but otherwise it reports a different error.
@everywhere function fun_parallel(data)
result = ones(2)
np = nprocs()
i = 1
nextidx() = (idx=i; i+=1; idx)
@sync begin
for p = 1:np
if p != myid() || np == 1
@async begin
while true
idx = nextidx()
if idx > 2
break
end
result[idx] = remotecall_fetch(fun_single, p, data[idx])
end
end
end
end
end
return sum(result)
end
using Optim
res = Optim.optimize(fun_parallel, ones(2), Newton(), Optim.Options(show_trace = true); autodiff = :forward)
The error I got is as follows:
TaskFailedException:
MethodError: no method matching Float64(::ForwardDiff.Dual{ForwardDiff.Tag{typeof(fun_parallel),Float64},Float64,2})
Closest candidates are:
Float64(::Real, !Matched::RoundingMode) where T<:AbstractFloat at rounding.jl:200
Float64(::T) where T<:Number at boot.jl:716
Float64(!Matched::Irrational{:Îł}) at irrationals.jl:189
...
Stacktrace:
[1] convert(::Type{Float64}, ::ForwardDiff.Dual{ForwardDiff.Tag{typeof(fun_parallel),Float64},Float64,2}) at ./number.jl:7
[2] setindex!(::Array{Float64,1}, ::ForwardDiff.Dual{ForwardDiff.Tag{typeof(fun_parallel),Float64},Float64,2}, ::Int64) at ./array.jl:847
[3] macro expansion at /Users/Desktop/Notes.jl:2695 [inlined]
[4] (::var"#7#9"{Array{ForwardDiff.Dual{ForwardDiff.Tag{typeof(fun_parallel),Float64},Float64,2},1},Array{Float64,1},var"#nextidx#8",Int64})() at ./task.jl:356
...and 1 more exception(s).
sync_end(::Channel{Any}) at task.jl:314
macro expansion at task.jl:333 [inlined]
fun_parallel(::Array{ForwardDiff.Dual{ForwardDiff.Tag{typeof(fun_parallel),Float64},Float64,2},1}) at Notes.jl:2686
vector_mode_dual_eval at apiutils.jl:37 [inlined]
vector_mode_gradient!(::DiffResults.MutableDiffResult{1,Float64,Tuple{Array{Float64,1}}}, ::typeof(fun_parallel), ::Array{Float64,1}, ::ForwardDiff.GradientConfig{ForwardDiff.Tag{typeof(fun_parallel),Float64},Float64,2,Array{ForwardDiff.Dual{ForwardDiff.Tag{typeof(fun_parallel),Float64},Float64,2},1}}) at gradient.jl:113
gradient! at gradient.jl:37 [inlined]
gradient! at gradient.jl:35 [inlined]
(::NLSolversBase.var"#42#48"{typeof(fun_parallel),ForwardDiff.GradientConfig{ForwardDiff.Tag{typeof(fun_parallel),Float64},Float64,2,Array{ForwardDiff.Dual{ForwardDiff.Tag{typeof(fun_parallel),Float64},Float64,2},1}}})(::Array{Float64,1}, ::Array{Float64,1}) at twicedifferentiable.jl:130
value_gradient!!(::TwiceDifferentiable{Float64,Array{Float64,1},Array{Float64,2},Array{Float64,1}}, ::Array{Float64,1}) at interface.jl:82
initial_state(::Newton{LineSearches.InitialStatic{Float64},LineSearches.HagerZhang{Float64,Base.RefValue{Bool}}}, ::Optim.Options{Float64,Nothing}, ::TwiceDifferentiable{Float64,Array{Float64,1},Array{Float64,2},Array{Float64,1}}, ::Array{Float64,1}) at newton.jl:45
optimize at optimize.jl:33 [inlined]
#optimize#87 at interface.jl:142 [inlined]
(::Optim.var"#optimize##kw")(::NamedTuple{(:autodiff,),Tuple{Symbol}}, ::typeof(optimize), ::Function, ::Array{Float64,1}, ::Newton{LineSearches.InitialStatic{Float64},LineSearches.HagerZhang{Float64,Base.RefValue{Bool}}}, ::Optim.Options{Float64,Nothing}) at interface.jl:141
top-level scope at Notes.jl:2705
I wonder how I should revise my code. Thank you very much.