AttributeError when calling python method using PyCall

I could run the following basic example script from TensorFlow documentation without a hassle in python.

import tensorflow as tf
from tensorflow import keras

# Load the dataset.
fashion_mnist = keras.datasets.fashion_mnist
(trn_images, trn_labels), (test_images, test_labels) = fashion_mnist.load_data()

train_images = trn_images[:50000]
train_labels = trn_labels[:50000]
valid_images = trn_images[50000:]
valid_labels = trn_labels[50000:]

# Define class names
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
               'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']

# Normalize the dataset.
train_images = train_images / 255.0
test_images = test_images / 255.0

# Construct the model
model = keras.Sequential([
    keras.layers.Flatten(input_shape=(28, 28)),
    keras.layers.Dense(128, activation=tf.nn.relu),
    keras.layers.Dense(10, activation=tf.nn.softmax)
])

# Compile the model.
model.compile(optimizer=tf.train.AdamOptimizer(),
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
# Train the model
model.fit(train_images, train_labels, epochs=5)

But, when I wanted to run the same example using PyCall in Julia using the script below,

using PyCall

@pyimport tensorflow as tf

# Load the dataset
fashion_mnist = tf.keras[:datasets][:fashion_mnist]
(trn_images, trn_labels), (test_images, test_labels) = fashion_mnist[:load_data]()

# Divide train dataset as train and validation dataset.
train_images = trn_images[1:50000, :, :]
train_labels = trn_labels[1:50000]
valid_images = trn_images[50000:end, :, :]
valid_labels = trn_labels[50000:end]

# Define class names
class_names = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat",
               "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"]

# Normalize the dataset.
train_images = train_images / 255.0
test_images = test_images / 255.0

# Construct the model
model = tf.keras[:Sequential]([
    tf.keras[:layers][:Flatten](input_shape=(28, 28)),
    tf.keras[:layers][:Dense](128, activation=tf.nn[:relu]),
    tf.keras[:layers][:Dense](10, activation=tf.nn[:softmax])
])

# Compile the model.
model[:compile](optimizer=tf.train[:AdamOptimizer](),
              loss="sparse_categorical_crossentropy",
              metrics=["accuracy"])

# Train the model
model[:fit](train_images, train_labels, epochs=5)

an AttributeError saying that bytearray has no attribute as ndim is thrown from python side as follows.

ERROR: LoadError: PyError ($(Expr(:escape, :(ccall(#= /home/sari/.julia/packages/PyCall/0jMpb/src/pyfncall.jl:44 =# @pysym(:PyObject_Call), PyPtr, (PyPtr, PyPtr, PyPtr), o, pyargsptr, kw))))) <class 'AttributeError'>
AttributeError("'bytearray' object has no attribute 'ndim'",)
  File "/home/sari/anaconda3/lib/python3.6/site-packages/tensorflow/python/keras/engine/training.py", line 1509, in fit
    validation_split=validation_split)
  File "/home/sari/anaconda3/lib/python3.6/site-packages/tensorflow/python/keras/engine/training.py", line 993, in _standardize_user_data
    class_weight, batch_size)
  File "/home/sari/anaconda3/lib/python3.6/site-packages/tensorflow/python/keras/engine/training.py", line 1149, in _standardize_weights
    exception_prefix='target')
  File "/home/sari/anaconda3/lib/python3.6/site-packages/tensorflow/python/keras/engine/training_utils.py", line 277, in standardize_input_data
    data = [standardize_single_array(x) for x in data]
  File "/home/sari/anaconda3/lib/python3.6/site-packages/tensorflow/python/keras/engine/training_utils.py", line 277, in <listcomp>
    data = [standardize_single_array(x) for x in data]
  File "/home/sari/anaconda3/lib/python3.6/site-packages/tensorflow/python/keras/engine/training_utils.py", line 215, in standardize_single_array
    elif x.ndim == 1:

Stacktrace:
 [1] pyerr_check at /home/sari/.julia/packages/PyCall/0jMpb/src/exception.jl:60 [inlined]
 [2] pyerr_check at /home/sari/.julia/packages/PyCall/0jMpb/src/exception.jl:64 [inlined]
 [3] macro expansion at /home/sari/.julia/packages/PyCall/0jMpb/src/exception.jl:84 [inlined]
 [4] __pycall!(::PyObject, ::Ptr{PyCall.PyObject_struct}, ::PyObject, ::PyObject) at /home/sari/.julia/packages/PyCall/0jMpb/src/pyfncall.jl:44
 [5] _pycall!(::PyObject, ::PyObject, ::Tuple{Array{Float64,3},Array{UInt8,1}}, ::Int64, ::PyObject) at /home/sari/.julia/packages/PyCall/0jMpb/src/pyfncall.jl:22
 [6] _pycall!(::PyObject, ::PyObject, ::Tuple{Array{Float64,3},Array{UInt8,1}}, ::Base.Iterators.Pairs{Symbol,Int64,Tuple{Symbol},NamedTuple{(:epochs,),Tuple{Int64}}}) at /home/sari/.julia/packages/PyCall/0jMpb/src/pyfncall.jl:11
 [7] #call#89(::Base.Iterators.Pairs{Symbol,Int64,Tuple{Symbol},NamedTuple{(:epochs,),Tuple{Int64}}}, ::PyObject, ::Array{Float64,3}, ::Vararg{Any,N} where N) at /home/sari/.julia/packages/PyCall/0jMpb/src/pyfncall.jl:89
 [8] (::getfield(PyCall, Symbol("#kw#PyObject")))(::NamedTuple{(:epochs,),Tuple{Int64}}, ::PyObject, ::Array{Float64,3}, ::Vararg{Any,N}where N) at ./none:0
 [9] top-level scope at none:0
in expression starting at /home/sari/.julia/v0.6/FractalTools/example/worskpace.jl:36
julia>

The error is thrown when the line model[: fit](train_images, train_labels, epochs=5) being executed, and I think that it stems from the type inconsistency of test_labels between Julia and python. The type of test_labels is numpy.ndarray with dtype=uint8 in python. and Array{UInt8, 1} in Julia. I expect these two types to be compatible to each other.

Any ideas how to fix this problem?