Hi!
I want to create this structure for generalized linear models (GLM) but I get the following error message:
ERROR: type GLM has no field train!
Stacktrace:
[1] getproperty
@ ./Base.jl:49 [inlined]
[2] main()
@ Main ~/Documents/projektek/machine_learning/test.jl:70
[3] top-level scope
@ ~/Documents/projektek/machine_learning/test.jl:80
The code is:
using LinearAlgebra, Random, Plots
Generate synthetic data
function generate_data(n_samples, n_features, noise=0.1)
Random.seed!(42) # For reproducibility
X = rand(n_samples, n_features) # Features matrix
X = hcat(ones(n_samples), X) # Add bias term
true_weights = randn(n_features + 1) # True weights (including bias)
y = X * true_weights + noise * randn(n_samples) # Outputs with noise
return X, y, true_weights
end
Generalized Linear Model
mutable struct GLM
learning_input::Matrix{Float64}
learning_output::Vector{Float64}
theta::Vector{Float64}
learning_rate::Float64
max_iter::Int64
convergence::Float64
cost_history::Vector{Float64}
# Constructor
function GLM(learning_input::Matrix{Float64}, learning_output::Vector{Float64};
learning_rate::Float64 = 0.1, max_iter::Int64 = 100, convergence::Float64 = 1e-6)
@assert size(learning_input, 1) == length(learning_output) "Inputs and outputs sizes do not match"
n_features = size(learning_input, 2)
new(learning_input, learning_output, zeros(n_features), learning_rate, max_iter, convergence, Float64[])
end
# Hypothesis function
function hypothesis(self::GLM) # Corrected method definition
return self.learning_input * self.theta
end
# Batch gradient descent
function train!(self::GLM) # Use `train!` to indicate in-place modification
epoch = size(self.learning_input, 1)
for i in 1:self.max_iter
prediction = self.hypothesis()
error = prediction - self.learning_output
gradient = (self.learning_input' * error) / epoch
self.theta -= self.learning_rate * gradient
mean_squared_error = sqrt(sum(error.^2) / (2 * (epoch-1)))
push!(self.cost_history, mean_squared_error)
# Convergence checks
if mean_squared_error < self.convergence
break
elseif i > 1 && abs(self.cost_history[end] - self.cost_history[end-1]) < self.convergence
break
end
end
end
end
Main function
function main()
# Parameters
n_samples = 100
n_features = 3
noise = 0.2
# Generate synthetic data
X, y, true_weights = generate_data(n_samples, n_features, noise)
# Initialize and train GLM
glm = GLM(X, y)
glm.train!()
# Output results
println("True Weights: $true_weights")
println("Estimated Weights: $(glm.theta)")
# Plot cost convergence
plot(glm.cost_history, title="Cost Convergence", xlabel="Iterations", ylabel="Cost", legend=false)
end
main()
Can you fix the code so that it keeps its structure but runs?
Thank you