using Flux
using CUDA
data = randn(Float32, 2, 100000) |> gpu
y = reshape(sin.(data[1,:] .* data[2,:]), (1, size(data)[2])) |> gpu
model = Chain(
Dense(2, 10, relu),
Dense(10, 10, relu),
Dense(10, 10, relu),
Dense(10, 10, relu),
Dense(10, 10, relu),
Dense(10, 10, relu),
Dense(10, 10, relu),
Dense(10, 1),
) |> gpu
opt = ADAM(0.001, (0.9, 0.999))
loss(x, y) = Flux.Losses.mse(model(x), y)
ps = Flux.params(model)
dl = Flux.DataLoader((data, y), batchsize=500, shuffle=true)|> gpu
Flux.@epochs 100 Flux.train!(loss, ps, dl, opt; cb = Flux.throttle(() -> @show(loss(data, y)), 10))
def test_tf():
import tensorflow as tf
import numpy as np
from tensorflow import keras
# tf.config.experimental.set_visible_devices(gpu[0], 'GPU')
with tf.device("/gpu:0"):
model = tf.keras.Sequential([
keras.layers.Dense(units=10, activation='relu', input_shape=[2]),
keras.layers.Dense(units=10, activation='relu'),
keras.layers.Dense(units=10, activation='relu'),
keras.layers.Dense(units=10, activation='relu'),
keras.layers.Dense(units=10, activation='relu'),
keras.layers.Dense(units=10, activation='relu'),
keras.layers.Dense(units=10, activation='relu'),
keras.layers.Dense(units=1),
]
)
model.compile(optimizer=keras.optimizers.Adam(1e-3), loss="mean_squared_error")
xs = np.random.randn(100000, 2).astype(np.float32)
ys = np.sin(xs[:,0] * xs[:, 1]).astype(np.float32)
model.fit(xs, ys, epochs=100, batch_size=500)
if name == "main":
import time
t0 = time.time()
test_tf()
print("everage time of epoch is {}".format((time.time()-t0)/100))