Error: Structure using its function

Hi!

I want to create this structure for generalized linear models (GLM) but I get the following error message:
ERROR: type GLM has no field train!
Stacktrace:
[1] getproperty
@ ./Base.jl:49 [inlined]
[2] main()
@ Main ~/Documents/projektek/machine_learning/test.jl:70
[3] top-level scope
@ ~/Documents/projektek/machine_learning/test.jl:80

The code is:

using LinearAlgebra, Random, Plots

Generate synthetic data

function generate_data(n_samples, n_features, noise=0.1)
Random.seed!(42) # For reproducibility
X = rand(n_samples, n_features) # Features matrix
X = hcat(ones(n_samples), X) # Add bias term
true_weights = randn(n_features + 1) # True weights (including bias)
y = X * true_weights + noise * randn(n_samples) # Outputs with noise
return X, y, true_weights
end

Generalized Linear Model

mutable struct GLM
learning_input::Matrix{Float64}
learning_output::Vector{Float64}
theta::Vector{Float64}
learning_rate::Float64
max_iter::Int64
convergence::Float64
cost_history::Vector{Float64}

# Constructor
function GLM(learning_input::Matrix{Float64}, learning_output::Vector{Float64};
             learning_rate::Float64 = 0.1, max_iter::Int64 = 100, convergence::Float64 = 1e-6)
    @assert size(learning_input, 1) == length(learning_output) "Inputs and outputs sizes do not match"
    n_features = size(learning_input, 2)
    new(learning_input, learning_output, zeros(n_features), learning_rate, max_iter, convergence, Float64[])
end

# Hypothesis function
function hypothesis(self::GLM)  # Corrected method definition
    return self.learning_input * self.theta 
end

# Batch gradient descent
function train!(self::GLM)  # Use `train!` to indicate in-place modification
    epoch = size(self.learning_input, 1)

    for i in 1:self.max_iter
        prediction = self.hypothesis()
        error = prediction - self.learning_output
        gradient = (self.learning_input' * error) / epoch
        self.theta -= self.learning_rate * gradient
        mean_squared_error = sqrt(sum(error.^2) / (2 * (epoch-1)))
        push!(self.cost_history, mean_squared_error)

        # Convergence checks
        if mean_squared_error < self.convergence
            break
        elseif i > 1 && abs(self.cost_history[end] - self.cost_history[end-1]) < self.convergence
            break
        end
    end
end

end

Main function

function main()
# Parameters
n_samples = 100
n_features = 3
noise = 0.2

# Generate synthetic data
X, y, true_weights = generate_data(n_samples, n_features, noise)

# Initialize and train GLM
glm = GLM(X, y)
glm.train!()

# Output results
println("True Weights: $true_weights")
println("Estimated Weights: $(glm.theta)")

# Plot cost convergence
plot(glm.cost_history, title="Cost Convergence", xlabel="Iterations", ylabel="Cost", legend=false)

end

main()

Can you fix the code so that it keeps its structure but runs?

Thank you

Hi! After playing around with it for a bit I am not quite sure how you might define a function, other than a constructor, inside of a struct. If you want a function to be linked to the struct GLM you can define it independently but require that the input be of type GLM.

Something like this should do the trick.

using Distributions
using Plots
using LinearAlgebra
using Random

function generate_data(n_samples, n_features, noise=0.1)
    Random.seed!(42) # For reproducibility
    X = rand(n_samples, n_features) # Features matrix
    X = hcat(ones(n_samples), X) # Add bias term
    true_weights = randn(n_features + 1) # True weights (including bias)
    y = X * true_weights + noise * randn(n_samples) # Outputs with noise
    return X, y, true_weights
end

mutable struct GLM
    learning_input::Matrix{Float64}
    learning_output::Vector{Float64}
    theta::Vector{Float64}
    learning_rate::Float64
    max_iter::Int64
    convergence::Float64
    cost_history::Vector{Float64}

    # Constructor
    function GLM(learning_input::Matrix{Float64}, learning_output::Vector{Float64};
        learning_rate::Float64 = 0.1, max_iter::Int64 = 100, convergence::Float64 = 1e-6)
        @assert size(learning_input, 1) == length(learning_output) "Inputs and outputs sizes do not match"
        n_features = size(learning_input, 2)
        new(learning_input, learning_output, zeros(n_features), learning_rate, max_iter, convergence, Float64[])
    end
end

# Hypothesis function
function hypothesis(model::GLM)
    return model.learning_input * model.theta
end

# Batch gradient descent
function train!(model::GLM)
    epoch = size(model.learning_input, 1)

    for i in 1:model.max_iter
        prediction = hypothesis(model)
        error = prediction - model.learning_output
        gradient = (model.learning_input' * error) / epoch
        model.theta -= model.learning_rate * gradient
        mean_squared_error = sqrt(sum(error.^2) / (2 * (epoch-1)))
        push!(model.cost_history, mean_squared_error)

        # Convergence checks
        if mean_squared_error < model.convergence
            break
        elseif i > 1 && abs(model.cost_history[end] - model.cost_history[end-1]) < model.convergence
            break
        end
    end
end

function main()
    # Parameters
    n_samples = 100
    n_features = 3
    noise = 0.2

    # Generate synthetic data
    X, y, true_weights = generate_data(n_samples, n_features, noise)

    # Initialize and train GLM
    glm = GLM(X, y)
    train!(glm)

    # Output results
    println("True Weights: $true_weights")
    println("Estimated Weights: $(glm.theta)")

    # Plot cost convergence
    plot(glm.cost_history, title="Cost Convergence", xlabel="Iterations", ylabel="Cost", legend=false)
end

main()

1 Like

Format your code in a fenced code block between triple backticks, it’s an extended Markdown syntax supported on forums like discourse.

self/this in member methods is an object-oriented practice. Julia is not object-oriented, its types do not encapsulate methods like this.

Thank you!