If you want you can try this code. It uses Artificial Bee Colony algorithm, I coded it some years ago and for non differentiable problems usually gave me good results
using StaticArrays
using Random
SEED = 1234
rng = Xoshiro(SEED)
abstract type ABC_SolInfo end
"""
This code is for Artificial Bee Colony optimization, ABC, it tries to find the minimum of a function
it is presented in the paper:
[1] M. Mernik, SH. Liu, D. Karaboga, M. Crepinsek,
On clarifying misconceptions when comparing variants of the
Artificial Bee Colony Algorithm by offering a new implementation
it is an implementation of ABC_imp1 of the appendix
"""
Base.@kwdef struct ABC_imp1{T,N,SN,info<:ABC_SolInfo}
id::Int # id number
fitness::Function # Fitness function
MFE::Int # Maximum number of fitness evaluations
limit::Int # Maximum number of trial for abandoning a source
nf_eval::MVector{1,Int} # number of evaluations of fitness function
debug::Bool
position_min::MVector{N,T}
position_max::MVector{N,T}
positions::SizedVector{SN,MVector{N,T}} # Positions of bees
fit_pop::MVector{SN,Float64} # fitness of population
add_fit_pop_info::SizedVector{SN,info} # additional info to fit_pop, as time of flight to get the value
best_position::MVector{N,T} = zeros(MVector{N,T}) # best position ever recorded
fitness_best::MVector{1,Float64} = [Inf] # fitness of best posittion ever recorded
add_fitness_best_info::SizedVector{1,info} = [zero(info)] # additional info to fitness_best, as time of flight to get the value
trials::MVector{SN,Int} = zeros(MVector{SN,Int}) # number of trials of each employed bee
history_fit::Array{Tuple{Int64, Float64}, 1} = Tuple{Int,Float64}[]
early_end::Bool = false
val_early_end::Float64 = 0
tol_f::Float64 = 1e-2
end
length_pop(abc::ABC_imp1{T,N,SN,info}) where {T,N,SN,info} = SN
length_par(abc::ABC_imp1{T,N,SN,info}) where {T,N,SN,info} = N
function init_ABC!(info::Type{<:ABC_SolInfo}, min_limits::MVector{N,T}, max_limits::MVector{N,T}, fitness::Function; SN::Int=100, MFE::Int=1000, id::Int=1,
debug::Bool=false, limit_=nothing, val_early_end_=nothing, tol_f_=nothing, other_sol::Union{Nothing, Array{MVector{N,T},1}}=nothing ) where {T,N}
"""
min_limits is the minimum values for postion (elementwise)
max_limits is the maximum values for position (elementwise)
SN is the Number of Foods
MFE Maximum number of fitness evaluations
limit Maximum number of trial for abandoning a source
max_limits[i] ≥ position[i] ≥ min_limits[i]; i = length(arr)
"""
positions = zeros(SizedVector{SN,MVector{N,T}})
fit_pop = zeros(MVector{SN,Float64})
add_fit_pop_info = zeros(SizedVector{SN,info})
nf_eval = zeros(MVector{1,Int})
if !(isnothing(other_sol))
l = length(other_sol)
for s in 1:l
vec_setting!(positions[s], other_sol[s])
nf_eval[1] += 1
fit_pop_i, add_fit_pop_info_i = fitness(positions[s])
fit_pop[s] = fit_pop_i
add_fit_pop_info[s] = add_fit_pop_info_i
end
l += 1
else
l = 1
end
for s in l:SN
rand_setting!(positions[s], max_limits, min_limits)
nf_eval[1] += 1
fit_pop_i, add_fit_pop_info_i = fitness(positions[s])
fit_pop[s] = fit_pop_i
add_fit_pop_info[s] = add_fit_pop_info_i
end
if isnothing(val_early_end_)
early_end = false
val_early_end = 0
else
early_end = true
val_early_end = val_early_end_
end
if isnothing(limit_)
limit = SN*N
else
limit = limit_
end
if isnothing(tol_f_)
return ABC_imp1{T,N,SN,info}(
id = id,
fitness = fitness,
MFE = MFE,
limit = limit,
nf_eval = nf_eval,
debug = debug,
position_min = min_limits,
position_max = max_limits,
positions = positions,
fit_pop = fit_pop,
add_fit_pop_info = add_fit_pop_info,
early_end = early_end,
val_early_end = val_early_end
)
end
return ABC_imp1{T,N,SN,info}(
id = id,
fitness = fitness,
MFE = MFE,
limit = limit,
nf_eval = nf_eval,
debug = debug,
position_min = min_limits,
position_max = max_limits,
positions = positions,
fit_pop = fit_pop,
add_fit_pop_info = add_fit_pop_info,
early_end = early_end,
val_early_end = val_early_end,
tol_f = tol_f_ )
end
function init_array_ABC!(info::Type{<:ABC_SolInfo}, M::Int, min_limits::MVector{N,T}, max_limits::MVector{N,T}, fitness::Function; SN::Int=100, MFE::Int=1000,
debug::Bool=false, limit_=nothing, val_early_end_=nothing, tol_f_=nothing, other_sol::Union{Nothing, Array{MVector{N,T},1}}=nothing ) where {T,N}
"""
M number of similar problems to evolve
min_limits is the minimum values for postion (elementwise)
max_limits is the maximum values for position (elementwise)
SN is the Number of Foods
MFE Maximum number of fitness evaluations
limit Maximum number of trial for abandoning a source
max_limits[i] ≥ position[i] ≥ min_limits[i]; i = length(arr)
"""
abc = ABC_imp1{T,N,SN,info}[]
for m in 1:M
abc_m = init_ABC!(info, min_limits, max_limits, fitness; SN, MFE, debug, limit_, val_early_end_, tol_f_, other_sol)
push!(abc, abc_m)
end
return SizedVector{M}(abc)
end
function vec_setting!(position::MVector{N,T}, vec::MVector{N,T}) where{T,N}
for jj in 1:N
position[jj] = vec[jj]
end
return nothing
end
function rand_setting!(position::MVector{N,T}, max_limits::MVector{N,T}, min_limits::MVector{N,T}) where{T,N}
for jj in 1:N
position[jj] = rand(rng)*(max_limits[jj] - min_limits[jj]) + min_limits[jj]
end
return nothing
end
function employed_bee!( v_position::MVector{N,T}, position_i::MVector{N,T}, position_k::MVector{N,T}, position_min::MVector{N,T}, position_max::MVector{N,T}; λ::Float64=1.0 ) where {T,N}
j = rand(rng, 1:N) # parameter to be actualized
for jj in 1:N
if jj != j
v_position[jj] = position_i[jj]
else
v_position[jj] = position_i[jj] + (-1+2*rand(rng))*(position_i[jj] - position_k[jj])*λ
if !(position_min[jj] <= v_position[jj] <= position_max[jj] )
v_position[jj] = rand(rng)*(position_max[jj] - position_min[jj]) + position_min[jj]
end
end
end
return nothing
end
function getK(i::Int, SN::Int)
return rand(rng, [j for j in 1:SN if j!=i ])
end
function actualize_probabilities!(prob::MVector{SN,Float64}, fit_pop::MVector{SN,Float64}) where {SN}
for ii in 1:SN
if fit_pop[ii] > 0
prob[ii] = 1 / (1 + fit_pop[ii])
else
prob[ii] = 1 + abs( fit_pop[ii] )
end
end
aux = sum(prob)
prob .= prob ./ aux
return nothing
end
function evolve_ABC!(abc::ABC_imp1{T,N,SN,info}) where {T,N,SN,info}
"""
we evolve the population according to ABC algorithm
"""
v_position = zeros(MVector{N,T})
prob = zeros(MVector{SN,Float64})
while true
# employed bees phase
for s in 1:SN
k = getK(s, SN)
employed_bee!( v_position, abc.positions[s], abc.positions[k], abc.position_min, abc.position_max)
abc.nf_eval[1] += 1
fit_pop_i, add_fit_pop_info_i = abc.fitness(v_position)
if fit_pop_i < abc.fit_pop[s]
abc.positions[s] = deepcopy(v_position)
abc.fit_pop[s] = fit_pop_i
abc.add_fit_pop_info[s] = add_fit_pop_info_i
abc.trials[s] = 0
else
abc.trials[s] += 1
end
if end_condition!(abc)
return nothing
end
end
# actualize probabilities
actualize_probabilities!(prob, abc.fit_pop)
# Onlooker bee phase
for t in 1:SN
r = rand(rng)
rs = 0.0
for s in 1:SN
rs += prob[s]
if r <= rs
k = getK(s, SN)
employed_bee!( v_position, abc.positions[s], abc.positions[k], abc.position_min, abc.position_max)
abc.nf_eval[1] += 1
fit_pop_i, add_fit_pop_info_i = abc.fitness(v_position)
if fit_pop_i < abc.fit_pop[s]
abc.positions[s] = deepcopy(v_position)
abc.fit_pop[s] = fit_pop_i
abc.add_fit_pop_info[s] = add_fit_pop_info_i
abc.trials[s] = 0
else
abc.trials[s] += 1
end
if end_condition!(abc)
return nothing
end
break
end
end
end
# Scout bee phase
for s in 1:SN
if abc.trials[s] >= abc.limit
rand_setting!(abc.positions[s], abc.position_max, abc.position_min)
abc.nf_eval[1] += 1
fit_pop_i, add_fit_pop_info_i = abc.fitness(abc.positions[s])
abc.fit_pop[s] = fit_pop_i
abc.add_fit_pop_info[s] = add_fit_pop_info_i
abc.trials[s] = 0
if end_condition!(abc)
return nothing
end
end
end
if end_condition!(abc)
return nothing
end
end
end
function evolve_array_ABC!(abc::SizedVector{M,ABC_imp1{T,N,SN,info}}) where {T,N,SN,M,info}
"""
we evolve the population according to ABC algorithm
"""
Threads.@threads for m in 1:M
evolve_ABC!(abc[m])
end
end
function end_condition!(abc::ABC_imp1{T,N,SN}) where {T,N,SN}
idx = argmin(abc.fit_pop)
if abc.fit_pop[idx] < abc.fitness_best[1]
abc.best_position .= deepcopy(abc.positions[idx])
abc.add_fitness_best_info[1] = abc.add_fit_pop_info[idx]
abc.fitness_best[1] = abc.fit_pop[idx]
if abc.debug
println(abc.nf_eval[1], " ", abc.fitness_best[1]," ",abc.id)
end
push!(abc.history_fit, (abc.nf_eval[1], abc.fitness_best[1]))
end
if abc.early_end
if abs(abc.val_early_end - abc.fitness_best[1] ) < abc.tol_f
return true
end
end
if abc.nf_eval[1] >= abc.MFE
return true
else
return false
end
end
"""
Example of use
"""
struct someinfo <:ABC_SolInfo
x::Float64
end
import Base.zero
Base.zero(::Type{someinfo}) = someinfo(0.0)
function main_ABC()
function fittt(par::MVector{N,T}) where {N,T}
return sum(cos.(par)), someinfo(Ď€)
end
N = 2
min_arr = MVector{N,Float64}([ -10 for ii in 1:N ])
max_arr = MVector{N,Float64}([ 10 for ii in 1:N ])
abc = init_ABC!(someinfo, min_arr, max_arr, fittt, debug=false, SN=5, MFE = 100)
evolve_ABC!(abc)
return abc
end
function main_array_ABC()
function fittt(par::MVector{N,T}) where {N,T}
return sum(cos.(par)), someinfo(Ď€)
end
N = 2
M = 3
min_arr = MVector{N,Float64}([ -10 for ii in 1:N ])
max_arr = MVector{N,Float64}([ 10 for ii in 1:N ])
abc = init_array_ABC!(someinfo, M, min_arr, max_arr, fittt, debug=false, SN=5, MFE = 100)
evolve_array_ABC!(abc)
return abc
end
The idea is that the function you want to optimize can give you two results. The value that you want to optimize, the standard deviation. And some additional info, the time delay.
I think with some changes in the code you can put more weight on the “bees” that are near the time delay constraint.