Solving SemilinearODE without using automatic differentiation

I have semilinear ODE with the time-dependent linear implicit part as function f1!
So, instead of solving (I - γΔt * J₁) * u_{n+1} = rhs, I want to use Krylov method to solve (I - γΔt * J1) * u_{n+1} = rhs, where J1 * u_{n+1} = f1!(res, u_{n+1})

But when I solve it using SplitODEProblem, it seems it errors trying to inject Duals in my function f1!, so it seems it is still performing automatic differentiation.

My questions:

  1. How can I tell the solver to use my custom JVP / Krylov operator without AD?
  2. Is using FunctionOperator the recommended way for a matrix-free implicit linear part in SplitODEProblem? I know DiffEq specializes when the linear implicit matrix is constant, but is there any advantage in case of time dependent linear map?

Any guidance or examples would be greatly appreciated. Thanks in advance!

f1!(dP, P, p, t)        = apply_f!(dP, P, nothing, p, t)
jvp_f1!(Jv, v, P, p, t) = apply_f!(Jv, v, nothing, p, t)

f = SplitFunction(f1!, apply_f2!; jvp = jvp_f1!)
prob = SplitODEProblem(f, copy(P), (0.0, tfinal), p)
sol = solve(prob, KenCarp4(linsolve = LS.KrylovJL_GMRES(), concrete_jac = false))
julia> sol = solve(prob, KenCarp4(linsolve = LS.KrylovJL_GMRES(), concrete_jac = false))
ERROR: MethodError: no method matching Float64(::ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1})
The type `Float64` exists, but no method is defined for this combination of argument types when trying to construct it.

Closest candidates are:
  (::Type{T})(::Real, ::RoundingMode) where T<:AbstractFloat
   @ Base rounding.jl:265
  (::Type{T})(::T) where T<:Number
   @ Core boot.jl:965
  Float64(::Float32)
   @ Base float.jl:345
  ...

Stacktrace:
  [1] convert(::Type{Float64}, x::ForwardDiff.Dual{ForwardDiff.Tag{DiffEqBase.OrdinaryDiffEqTag, Float64}, Float64, 1})
    @ Base ./number.jl:7
  [2] fill!
    @ ./array.jl:328 [inlined]
  [3] compute_poisson_rhs!(b::Vector{…}, Py::OffsetArray{…}, Px::OffsetArray{…}, Pz::OffsetArray{…}, p::Tuple{…})
    @ Main ~/Documents/FerroElectric/src/poisson_H1.jl:227
  [4] apply_f!(f1_P::Vector{…}, P::Vector{…}, u::Nothing, p::@NamedTuple{…}, t::Float64)
    @ Main ~/Documents/FerroElectric/scripts/implicit_linear_op.jl:31
  [5] f1!(dP::Vector{…}, P::Vector{…}, p::@NamedTuple{…}, t::Float64)
    @ Main ~/Documents/FerroElectric/scripts/implicit_linear_op.jl:344
  [6] (::ODEFunction{…})(::Vector{…}, ::Vararg{…})
    @ SciMLBase ~/.julia/packages/SciMLBase/wfZCo/src/scimlfunctions.jl:2573
  [7] (::SplitFunction{…})(du::Vector{…}, u::Vector{…}, p::@NamedTuple{…}, t::Float64)
    @ SciMLBase ~/.julia/packages/SciMLBase/wfZCo/src/scimlfunctions.jl:2609
  [8] compute_ydual_twoarg(::SplitFunction{…}, ::Vector{…}, ::DifferentiationInterfaceForwardDiffExt.ForwardDiffTwoArgPushforwardPrep{…}, ::Vector{…}, ::Tuple{…}, ::DifferentiationInterface.ConstantOrCache{…}, ::DifferentiationInterface.Constant{…})
    @ DifferentiationInterfaceForwardDiffExt ~/.julia/packages/DifferentiationInterface/cZ1tI/ext/DifferentiationInterfaceForwardDiffExt/twoarg.jl:59
  [9] pushforward!(::SplitFunction{…}, ::Vector{…}, ::Tuple{…}, ::DifferentiationInterfaceForwardDiffExt.ForwardDiffTwoArgPushforwardPrep{…}, ::ADTypes.AutoForwardDiff{…}, ::Vector{…}, ::Tuple{…}, ::DifferentiationInterface.ConstantOrCache{…}, ::DifferentiationInterface.Constant{…})
    @ DifferentiationInterfaceForwardDiffExt ~/.julia/packages/DifferentiationInterface/cZ1tI/ext/DifferentiationInterfaceForwardDiffExt/twoarg.jl:122
 [10] (::OrdinaryDiffEqDifferentiation.var"#prepare_jvp##0#prepare_jvp##1"{…})(Jv::Vector{…}, v::Vector{…}, u::Vector{…}, p::@NamedTuple{…}, t::Float64)
    @ OrdinaryDiffEqDifferentiation ~/.julia/packages/OrdinaryDiffEqDifferentiation/DE4z5/src/operators.jl:65
 [11] mul!(Jv::Vector{Float64}, J::OrdinaryDiffEqDifferentiation.JVPCache{Float64}, v::Vector{Float64})
    @ OrdinaryDiffEqDifferentiation ~/.julia/packages/OrdinaryDiffEqDifferentiation/DE4z5/src/operators.jl:53
 [12] mul!(Y::Vector{…}, W::OrdinaryDiffEqDifferentiation.WOperator{…}, B::Vector{…})
    @ OrdinaryDiffEqDifferentiation ~/.julia/packages/OrdinaryDiffEqDifferentiation/DE4z5/src/derivative_utils.jl:424
 [13] gmres!(workspace::Krylov.GmresWorkspace{…}, A::OrdinaryDiffEqDifferentiation.WOperator{…}, b::Vector{…}; M::LinearSolve.InvPreconditioner{…}, N::LinearAlgebra.Diagonal{…}, ldiv::Bool, restart::Bool, reorthogonalization::Bool, atol::Float64, rtol::Float64, itmax::Int64, timemax::Float64, verbose::Int64, history::Bool, callback::Krylov.var"#krylov_solve!##242#krylov_solve!##243", iostream::Core.CoreSTDOUT)
    @ Krylov ~/.julia/packages/Krylov/XuaiP/src/gmres.jl:251
 [14] gmres!
    @ ~/.julia/packages/Krylov/XuaiP/src/gmres.jl:117 [inlined]
 [15] krylov_solve!
    @ ~/.julia/packages/Krylov/XuaiP/src/interface.jl:231 [inlined]
 [16] solve!(cache::LinearSolve.LinearCache{…}, alg::LinearSolve.KrylovJL{…}; kwargs::@Kwargs{…})
    @ LinearSolve ~/.julia/packages/LinearSolve/DcHxD/src/iterative_wrappers.jl:296
 [17] solve!
    @ ~/.julia/packages/LinearSolve/DcHxD/src/iterative_wrappers.jl:247 [inlined]
 [18] #solve!#12
    @ ~/.julia/packages/LinearSolve/DcHxD/src/common.jl:304 [inlined]
 [19] solve!
    @ ~/.julia/packages/LinearSolve/DcHxD/src/common.jl:303 [inlined]
 [20] #dolinsolve#2
    @ ~/.julia/packages/OrdinaryDiffEqDifferentiation/DE4z5/src/linsolve_utils.jl:30 [inlined]
 [21] dolinsolve
    @ ~/.julia/packages/OrdinaryDiffEqDifferentiation/DE4z5/src/linsolve_utils.jl:5 [inlined]
 [22] compute_step!(nlsolver::OrdinaryDiffEqNonlinearSolve.NLSolver{…}, integrator::OrdinaryDiffEqCore.ODEIntegrator{…}, γW::Float64)
    @ OrdinaryDiffEqNonlinearSolve ~/.julia/packages/OrdinaryDiffEqNonlinearSolve/FAeQm/src/newton.jl:256
 [23] nlsolve!(nlsolver::OrdinaryDiffEqNonlinearSolve.NLSolver{…}, integrator::OrdinaryDiffEqCore.ODEIntegrator{…}, cache::OrdinaryDiffEqSDIRK.KenCarp4Cache{…}, repeat_step::Bool)
    @ OrdinaryDiffEqNonlinearSolve ~/.julia/packages/OrdinaryDiffEqNonlinearSolve/FAeQm/src/nlsolve.jl:52
 [24] perform_step!(integrator::OrdinaryDiffEqCore.ODEIntegrator{…}, cache::OrdinaryDiffEqSDIRK.KenCarp4Cache{…}, repeat_step::Bool)
    @ OrdinaryDiffEqSDIRK ~/.julia/packages/OrdinaryDiffEqSDIRK/02wwV/src/kencarp_kvaerno_perform_step.jl:1009
 [25] perform_step!
    @ ~/.julia/packages/OrdinaryDiffEqSDIRK/02wwV/src/kencarp_kvaerno_perform_step.jl:957 [inlined]
 [26] solve!(integrator::OrdinaryDiffEqCore.ODEIntegrator{…})
    @ OrdinaryDiffEqCore ~/.julia/packages/OrdinaryDiffEqCore/zs1s7/src/solve.jl:620
 [27] #__solve#51
    @ ~/.julia/packages/OrdinaryDiffEqCore/zs1s7/src/solve.jl:7 [inlined]
 [28] __solve
    @ ~/.julia/packages/OrdinaryDiffEqCore/zs1s7/src/solve.jl:1 [inlined]
 [29] #solve_call#27
    @ ~/.julia/packages/DiffEqBase/qvEPa/src/solve.jl:670 [inlined]
 [30] solve_call(_prob::ODEProblem{…}, args::KenCarp4{…})
    @ DiffEqBase ~/.julia/packages/DiffEqBase/qvEPa/src/solve.jl:627
 [31] solve_up(prob::ODEProblem{…}, sensealg::Nothing, u0::Vector{…}, p::@NamedTuple{…}, args::KenCarp4{…}; kwargs::@Kwargs{})
    @ DiffEqBase ~/.julia/packages/DiffEqBase/qvEPa/src/solve.jl:1205
 [32] solve_up
    @ ~/.julia/packages/DiffEqBase/qvEPa/src/solve.jl:1183 [inlined]
 [33] solve(prob::ODEProblem{…}, args::KenCarp4{…}; sensealg::Nothing, u0::Nothing, p::Nothing, wrap::Val{…}, kwargs::@Kwargs{})
    @ DiffEqBase ~/.julia/packages/DiffEqBase/qvEPa/src/solve.jl:1096
 [34] solve(prob::ODEProblem{…}, args::KenCarp4{…})
    @ DiffEqBase ~/.julia/packages/DiffEqBase/qvEPa/src/solve.jl:1086
 [35] top-level scope
    @ ~/Documents/FerroElectric/scripts/implicit_linear_op.jl:362
Some type information was truncated. Use `show(err)` to see complete types.
sol = solve(prob, KenCarp4(linsolve = LS.KrylovJL_GMRES(), concrete_jac = false, autodiff = ADTypes.AutoFiniteDiff()))

Matrix-free Krylov can always be done with differencing, so you don’t need an operator for this.

Thank you for the answer Chris.

But I’m wondering why does KenCarp4() needs to do any numerical differentiation (AD or FD) at all when I’m providing it analytical jacobian-vector-product function.

As per my understanding any numerical difference is needed only for solving (I - γΔt * J₁) * u_{n+1} = rhs, in the implicit stage. Is it used anywhere less in the integrator?

Oh I missed that. Open an issue. It might be skipped with the DI update

Open the issue here SplitODEProblem is using numerical differentiation even when analytical jacobian vector product is provided with a Krylov solver. · Issue #1109 · SciML/DifferentialEquations.jl · GitHub