@longemen3000 Hi Andres and thank you very much for everything.
I did as you suggested for the stage2_rbf
but still it came up another bug:
ERROR: LoadError: UndefVarError: X not defined
Stacktrace:
[1] fg!(::Float64, ::LinearAlgebra.Transpose{Float64,Array{Float64,1}}, ::LinearAlgebra.Transpose{Float64,Array{Float64,1}}) at /home/xxxx_xx/.julia/dev/PreImageProblems/src/pre_image_problem.
jl:195
[2] (::getfield(NLSolversBase, Symbol("##61#62")){NLSolversBase.InplaceObjective{Nothing,typeof(PreImageProblems.fg!),Nothing,Nothing,Nothing},Float64})(::LinearAlgebra.Transpose{Float64,Array
{Float64,1}}, ::LinearAlgebra.Transpose{Float64,Array{Float64,1}}) at /home/xxxx_xx/.julia/packages/NLSolversBase/NsXIC/src/objective_types/incomplete.jl:45
[3] value_gradient!!(::OnceDifferentiable{Float64,LinearAlgebra.Transpose{Float64,Array{Float64,1}},LinearAlgebra.Transpose{Float64,Array{Float64,1}}}, ::LinearAlgebra.Transpose{Float64,Array{
Float64,1}}) at /home/xxxx_xx/.julia/packages/NLSolversBase/NsXIC/src/interface.jl:82
[4] initial_state(::BFGS{LineSearches.InitialStatic{Float64},LineSearches.HagerZhang{Float64,Base.RefValue{Bool}},Nothing,Nothing,Flat}, ::Optim.Options{Float64,Nothing}, ::OnceDifferentiable{
Float64,LinearAlgebra.Transpose{Float64,Array{Float64,1}},LinearAlgebra.Transpose{Float64,Array{Float64,1}}}, ::LinearAlgebra.Transpose{Float64,Array{Float64,1}}) at /home/xxxx_xx/.julia/packag
es/Optim/EhyUl/src/multivariate/solvers/first_order/bfgs.jl:66
[5] optimize(::OnceDifferentiable{Float64,LinearAlgebra.Transpose{Float64,Array{Float64,1}},LinearAlgebra.Transpose{Float64,Array{Float64,1}}}, ::LinearAlgebra.Transpose{Float64,Array{Float64,1}}, ::BFGS{LineSearches.InitialStatic{Float64},LineSearches.HagerZhang{Float64,Base.RefValue{Bool}},Nothing,Nothing,Flat}, ::Optim.Options{Float64,Nothing}) at /home/xxxx_xx/.julia/packages/Optim/EhyUl/src/multivariate/optimize/optimize.jl:33 (repeats 2 times)
[6] myKPCA(::Array{Float64,2}, ::Int64, ::Array{Int64,1}, ::UnitRange{Int64}) at /home/ujku_ku/.julia/dev/PreImageProblems/src/pre_image_problem.jl:103
[7] top-level scope at none:0
in expression starting at /home/xxxx_xx/.julia/dev/PreImageProblems/src/polyDataDenoisExp.jl:239
and the line 195
in pre_image_problem
is the f, gradf = stage2_rbf(_x, X, K, cpK, alphaA, rho; nargout=2)
inside the function:
function fg!(F, G, _x)
f, gradf = stage2_rbf(_x, X, K, cpK, alphaA, rho; nargout=2) #uses the already defined variables
...
end
Maybe I have to explain how I included your code in my code:
function myKPCA(X::Array{Float64,2},
rho_m::Int64,
nL_vector::Array{Int64,1},
whidx::UnitRange{Int64})
# myKPCA KPCA with Gaussian RBF kernel
#
# 2 methods: (1) Scholkopf, and (2) Countour gradient
N, n = size(X) # N: sample size; n: dimension
oneOverN = 1/N
ones1n = ones(1, n)
Ntest = length(whidx) # whidx = 1:size(X, 1)
# inter-distances
norm2 = zeros(N, N)
for i in 1:N
for j in (i+1):N
norm2[i, j] = transpose(X[i, :] - X[j, :]) * (X[i, :] - X[j, :])
norm2[j, i] = norm2[i, j]
end
end
# rho = mean_component_variance * rho_m
mean_comp_var = mean(var(X, dims=1))
rho = mean_comp_var * size(X, 2) * rho_m # the same with rho = sum(var(X, dims=1)) * rho_m
# compute the kernel matrix
K = zeros(N, N)
for i in 1:N
for j in i:N
K[i, j] = exp(-norm2[i, j]/rho)
K[j, i] = K[i, j]
end
end
# centering the kernel matrix
unit = ones(N, 1)
unit2 = ones(N, N) # buffer
Kone = K * unit
oneKone = transpose(unit) * Kone # buffer
oneKoneUnit2 = oneOverN^2 .* oneKone .* unit
K_tilde = K - unit2 * K / N - K * unit2 / N + unit * oneKone * transpose(unit) / N^2
K_tilde = min.(K_tilde, transpose(K_tilde))
# K_tilde = min.(K_tilde, transpose(K_tilde), dims=1)
# find eigenvalues and eigenvectors of the centered kernel matrix
F = LinearAlgebra.eigen(K_tilde)
evecs = F.vectors
evals = real(F.values)
# denoising by 2 different methods
# loop over different choices of the number of rettained eigenvectors
for vv in 1:length(nL_vector)
@show vv
nL = nL_vector[vv] # 'nL_vector' is the vector that contains the number of the principal vectors used in every run
large_idx = N - nL .+ (1:nL) # grab the last nL index to be used later to grab the last nL eigenvalues in 'evals'
alphaA = evecs[:, large_idx] # grab the principal eigenvectors
levals = evals[large_idx] # grab the eigenvalues corresponding to the primcipal eigenvectors
alphaA = alphaA * Diagonal(levals .^ (-0.5)) # normalization of principal eigenvectors (e-vectors in feature space)
Xp1 = zeros(Ntest, n)
for ii in 1:Ntest
i = whidx[ii]
# x = X[i, :]
x = transpose(X[i, :])
Kx = zeros(N, 1)
for j in 1:N
Kx[j, i] = exp(-(x - transpose(X[j, :])) * transpose(x - transpose(X[j, :]))/rho)
end
Kx_tilde = Kx - 1/N .* K * unit - 1/N .* unit * transpose(unit) * Kx + 1/(N^2) .* (transpose(unit) * K * unit) .* unit
cpK = (transpose(Kx_tilde) * alphaA)
# optimization
# xp1 = optimize(x -> stage2_rbf(x, X, K, cp, alphaA, rho; nargout=2), x, BFGS()) #
f_stage2_rbf(x) = stage2_rbf(x, X, K, cpK, alphaA, rho; nargout=2)
objective = OnceDifferentiable(Optim.only_fg!(fg!), x)
xp1 = optimize(objective, x, BFGS())
# xp1 = optimize(f_stage2_rbf, x, BFGS())
Xp1[ii, :] = transpose(xp1)
end
return Xp1
end
function fg!(F::Any,
# F::Float64,
# F::Array{Float64,2},
G::Any,
# G::Array{Float64,1},
# _x::Array{Float64,1}
_x::Any)
f, gradf = stage2_rbf(_x, X, K, cpK, alphaA, rho; nargout=2)
if !(G == nothing)
for i=1:length(x)
G[i] = gradf[i, 1]
end
end
if !(F == nothing)
return f[1,1]
end
end
Do you think that it is better to use function
f_stage2_rbf(x) = stage2_rbf(x, X, K, cpK, alphaA, rho; nargout=2)
instead of
stage2_rbf(x, X, K, cp, alphaA, rho; nargout=2)
in function fg!
like this:
function fg!(F::Any,
# F::Float64,
# F::Array{Float64,2},
G::Any,
# G::Array{Float64,1},
# _x::Array{Float64,1}
_x::Any)
f, gradf = f_stage2_rbf(_x)
if !(G == nothing)
for i=1:length(x)
G[i] = gradf[i, 1]
end
end
if !(F == nothing)
return f[1,1]
end
end
Once more thank you very much for your patience and support.
Cheers.
Ergnoor