I am following along with Make Your Own Neural Network by Tariq Rashid. In this work you implement a small neural network by hand. This is for my own benefit and I know if I was doing something for real I should just Flux.jl etc. I was able to replicate makeyourownneuralnetwork/part3_neural_network_mnist_backquery.ipynb at master · makeyourownneuralnetwork/makeyourownneuralnetwork · GitHub in julia. However, I get only a modest performance improvement (2X) over python. I think it has someting to do with the type stability of my code. can someone take a look at the train!() function and help me understand how to make it type stable?
here is the MWE with dummy data.
# %%
using Random
using LogExpFunctions
# %%
rng = MersenneTwister(1234)
struct my_nn
in_nodes::Int
h_nodes::Int
out_nodes::Int
lr::Float32
wih::Array{Float32}
who::Array{Float32}
end
@views function train!(NN::my_nn, training_data, epochs)
label = zeros(Float32, NN.out_nodes)
num_data = size(training_data, 1)
hidden_inputs = zeros(Float32, NN.h_nodes)
hidden_outputs = zeros(Float32, NN.h_nodes)
final_inputs = zeros(Float32, NN.out_nodes)
final_outputs = zeros(Float32, NN.out_nodes)
output_errors = zeros(Float32, NN.out_nodes)
hidden_errors = zeros(Float32, NN.h_nodes)
for e in 1:epochs
for nd in 1:num_data
# setup data
inputs = training_data[nd, 2:end]
label .= 0.01f0
label[Int(training_data[nd, 1] + 1)] = 0.99f0
# train
# calculate signals into hidden layer
hidden_inputs .= NN.wih * inputs
# calculate the signals emerging from hidden layer
hidden_outputs .= logistic.(hidden_inputs)
# calculate signals into final output layer
final_inputs .= NN.who * hidden_outputs
# calculate the signals emerging from final output layer
final_outputs .= logistic.(final_inputs)
# output layer error is the (target - actual)
output_errors .= label .- final_outputs
# hidden layer error is the output_errors, split by weights, recombined at hidden nodes
hidden_errors .= NN.who' * output_errors
# update the weights for the links between the hidden and output layers
NN.who .+= NN.lr .* (output_errors .* final_outputs .* (1.0f0 .- final_outputs)) * hidden_outputs'
# update the weights for the links between the input and hidden layers
NN.wih .+= NN.lr .* (hidden_errors .* hidden_outputs .* (1.0f0 .- hidden_outputs)) * inputs'
end
end
end
# %%
# number of input, hidden and output nodes
input_nodes = 784
hidden_nodes = 200
output_nodes = 10
# learning rate
learning_rate = 0.1f0
wih = randn(rng, Float32, (hidden_nodes, input_nodes)) .* input_nodes^-0.5f0
who = randn(rng, Float32, (output_nodes, hidden_nodes)) .* hidden_nodes^-0.5f0
nn = my_nn(input_nodes, hidden_nodes, output_nodes, learning_rate, wih, who)
# %% dummy data
training_data = zeros(Float32, (60000, 785))
# %%
epochs = 5
@code_warntype train!(nn, training_data, epochs)
Thank you so much for your help!