MethodError: no method matching (::GeometricFlux.var"#adjacency_matrix#76")

Hi. This issue is basically the same as the one in GeoemtricFlux.jl issues at Method error when training GCNConv with FeaturedGraph input · Issue #76 · FluxML/GeometricFlux.jl · GitHub. I tried replying there but did not receive an answer.

The problem is that I am not able to differentiate a graph convolutional structure with featured graph inputs, and zygote returns this error:

MethodError: no method matching (::GeometricFlux.var"#adjacency_matrix#76")(::FeaturedGraph{Array{Float32,2},Array{Float32,2},Array{Float64,2},Array{Float64,1}})
Closest candidates are:
  adjacency_matrix(!Matched::AbstractArray{T,2} where T) at C:\Users\Reza\.juliapro\JuliaPro_v1.4.1-1\packages\GeometricFlux\k4atN\src\operations\linalg.jl:6
  adjacency_matrix(!Matched::AbstractArray{T,2} where T, !Matched::DataType) at C:\Users\Reza\.juliapro\JuliaPro_v1.4.1-1\packages\GeometricFlux\k4atN\src\operations\linalg.jl:6
_pullback at grad.jl:8 [inlined]
GCNConv at conv.jl:55 [inlined]
_pullback(::Zygote.Context, ::GCNConv{Float32,typeof(identity),FeaturedGraph{Array{Float64,2},Array{Float64,2},Array{Float64,2},Array{Float64,1}}}, ::FeaturedGraph{Array{Float32,2},Array{Float32,2},Array{Float64,2},Array{Float64,1}}) at interface2.jl:0
applychain at basic.jl:36 [inlined]
_pullback(::Zygote.Context, ::typeof(Flux.applychain), ::Tuple{GCNConv{Float32,typeof(identity),FeaturedGraph{Array{Float64,2},Array{Float64,2},Array{Float64,2},Array{Float64,1}}},GCNConv{Float32,typeof(identity),FeaturedGraph{Array{Float64,2},Array{Float64,2},Array{Float64,2},Array{Float64,1}}},var"#26979#26983",var"#26980#26984",Dense{typeof(identity),Array{Float32,2},Array{Float32,1}},Dense{typeof(identity),Array{Float32,2},Array{Float32,1}},var"#26981#26985",var"#26982#26986"}, ::FeaturedGraph{Array{Float32,2},Array{Float32,2},Array{Float64,2},Array{Float64,1}}) at interface2.jl:0
Chain at basic.jl:38 [inlined]
_pullback(::Zygote.Context, ::Chain{Tuple{GCNConv{Float32,typeof(identity),FeaturedGraph{Array{Float64,2},Array{Float64,2},Array{Float64,2},Array{Float64,1}}},GCNConv{Float32,typeof(identity),FeaturedGraph{Array{Float64,2},Array{Float64,2},Array{Float64,2},Array{Float64,1}}},var"#26979#26983",var"#26980#26984",Dense{typeof(identity),Array{Float32,2},Array{Float32,1}},Dense{typeof(identity),Array{Float32,2},Array{Float32,1}},var"#26981#26985",var"#26982#26986"}}, ::FeaturedGraph{Array{Float32,2},Array{Float32,2},Array{Float64,2},Array{Float64,1}}) at interface2.jl:0
#26987 at none:0 [inlined]
_pullback(::Zygote.Context, ::var"#26987#26988", ::Int64) at interface2.jl:0
MappingRF at reduce.jl:90 [inlined]
_foldl_impl at reduce.jl:55 [inlined]
_pullback(::Zygote.Context, ::typeof(Base._foldl_impl), ::Base.MappingRF{var"#26987#26988",Base.BottomRF{typeof(Base.add_sum)}}, ::Base._InitialValue, ::UnitRange{Int64}) at interface2.jl:0
foldl_impl at reduce.jl:45 [inlined]
_pullback(::Zygote.Context, ::typeof(Base.foldl_impl), ::Base.MappingRF{var"#26987#26988",Base.BottomRF{typeof(Base.add_sum)}}, ::NamedTuple{(),Tuple{}}, ::UnitRange{Int64}) at interface2.jl:0
mapfoldl_impl at reduce.jl:41 [inlined]
#mapfoldl#189 at reduce.jl:157 [inlined]
mapfoldl at reduce.jl:157 [inlined]
#mapreduce#193 at reduce.jl:283 [inlined]
mapreduce at reduce.jl:283 [inlined]
sum at reduce.jl:486 [inlined]
sum at reduce.jl:503 [inlined]
_pullback(::Zygote.Context, ::typeof(sum), ::Base.Generator{UnitRange{Int64},var"#26987#26988"}) at interface2.jl:0
my_loss_fg at train_neural_diving.jl:99 [inlined]
_pullback(::Zygote.Context, ::typeof(my_loss_fg)) at interface2.jl:0
#26989 at train_neural_diving.jl:104 [inlined]
_pullback(::Zygote.Context, ::var"#26989#26990", ::FeaturedGraph{Array{Float32,2},Array{Float32,2},Array{Float64,2},Array{Float64,1}}) at interface2.jl:0
_pullback(::Function, ::FeaturedGraph{Array{Float32,2},Array{Float32,2},Array{Float64,2},Array{Float64,1}}) at interface.jl:38
pullback(::Function, ::FeaturedGraph{Array{Float32,2},Array{Float32,2},Array{Float64,2},Array{Float64,1}}) at interface.jl:44
gradient(::Function, ::FeaturedGraph{Array{Float32,2},Array{Float32,2},Array{Float64,2},Array{Float64,1}}) at interface.jl:53
top-level scope at train_neural_diving.jl:104

Status `C:\Users\Reza\Documents\GitHub\MIP_Learning\Project.toml`
  [fbb218c0] BSON v0.2.6
  [052768ef] CUDA v1.2.1
  [2b5f629d] DiffEqBase v6.41.3
  [aae7a2af] DiffEqFlux v1.18.0
  [0c46a032] DifferentialEquations v6.15.0
  [587475ba] Flux v0.11.0
  [38e38edf] GLM v1.3.9
  [7e08b658] GeometricFlux v0.6.1
  [2e9cd046] Gurobi v0.8.1
  [82e4d734] ImageIO v0.3.0
  [6218d12a] ImageMagick v1.1.5
  [916415d5] Images v0.22.4
  [c601a237] Interact v0.10.3
  [4138dd39] JLD v0.10.0
  [033835bb] JLD2 v0.1.14
  [4076af6c] JuMP v0.21.3
  [093fc24a] LightGraphs v1.3.3
  [b8f27783] MathOptInterface v0.9.14
  [fdba3010] MathProgBase v0.7.8
  [a4795742] NLPModels v0.12.4
  [792afdf1] NLPModelsJuMP v0.6.3
  [c36e90e8] PowerModels v0.17.2
  [438e738f] PyCall v1.91.4
  [dca85d43] QuartzImageIO v0.7.2
  [82193955] SCIP v0.9.6
  [789caeaf] StochasticDiffEq v6.25.0
  [9f7883ad] Tracker v0.2.8
  [98b73d46] Trebuchet v0.2.1
  [e88e6eb3] Zygote v0.5.4
  [37e2e46d] LinearAlgebra
  [10745b16] Statistics

I appreciate your feedback.