From fc75bad6a952540b64ddecd377a97c8bcad6aea2 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Thu, 30 Mar 2023 15:09:24 +0530 Subject: [PATCH 01/53] added additional loss against data for NNODE(still needs reviewing) --- src/ode_solve.jl | 48 +++++++++++++++++++++++++++++++++--------------- 1 file changed, 33 insertions(+), 15 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 11f7429a4..5556e211e 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -3,7 +3,7 @@ abstract type NeuralPDEAlgorithm <: DiffEqBase.AbstractODEAlgorithm end """ ```julia NNODE(chain, opt=OptimizationPolyalgorithms.PolyOpt(), init_params = nothing; - autodiff=false, batch=0, kwargs...) + autodiff=false, batch=0,additional_loss=nothing kwargs...) ``` Algorithm for solving ordinary differential equations using a neural network. This is a specialization @@ -24,6 +24,14 @@ of the physics-informed neural network which is used as a solver for a standard ## Keyword Arguments +* `additional_loss`: A function additional_loss(phi, θ) where phi are the neural network trial solutions, + θ are the weights of the neural network(s). +example: + ts=[t for t in 1:100] + (u_, t_) = (analytical_func(ts), ts) + function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) + end * `autodiff`: The switch between automatic and numerical differentiation for the PDE operators. The reverse mode of the loss function is always automatic differentiation (via Zygote), this is only for the derivative @@ -63,7 +71,9 @@ is an accurate interpolation (up to the neural network training result). In addi Lagaris, Isaac E., Aristidis Likas, and Dimitrios I. Fotiadis. "Artificial neural networks for solving ordinary and partial differential equations." IEEE Transactions on Neural Networks 9, no. 5 (1998): 987-1000. """ -struct NNODE{C, O, P, B, K, S <: Union{Nothing, AbstractTrainingStrategy}} <: +struct NNODE{C, O, P, B, K, AL <: Union{Nothing, Function}, + S <: Union{Nothing, AbstractTrainingStrategy} + } <: NeuralPDEAlgorithm chain::C opt::O @@ -72,11 +82,12 @@ struct NNODE{C, O, P, B, K, S <: Union{Nothing, AbstractTrainingStrategy}} <: batch::B strategy::S kwargs::K + additional_loss::AL end function NNODE(chain, opt, init_params = nothing; strategy = nothing, - autodiff = false, batch = nothing, kwargs...) - NNODE(chain, opt, init_params, autodiff, batch, strategy, kwargs) + autodiff = false, batch = nothing, additional_loss = nothing, kwargs...) + NNODE(chain, opt, init_params, autodiff, batch, strategy, additional_loss, kwargs) end """ @@ -236,10 +247,10 @@ function inner_loss(phi::ODEPhi{C, T, U}, f, autodiff::Bool, t::AbstractVector, end """ -Representation of the loss function, paramtric on the training strategy `strategy` +Representation of the loss function, parametric on the training strategy `strategy` """ function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tspan, p, - batch) + batch, additional_loss = nothing) integrand(t::Number, θ) = abs2(inner_loss(phi, f, autodiff, t, θ, p)) integrand(ts, θ) = [abs2(inner_loss(phi, f, autodiff, t, θ, p)) for t in ts] @assert batch == 0 # not implemented @@ -250,11 +261,11 @@ function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tsp sol.u end - # Default this to ForwardDiff until Integrals.jl autodiff is sorted out - OptimizationFunction(loss, Optimization.AutoForwardDiff()) + return loss end -function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, batch) +function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, batch, + additional_loss = nothing) ts = tspan[1]:(strategy.dx):tspan[2] # sum(abs2,inner_loss(t,θ) for t in ts) but Zygote generators are broken @@ -266,11 +277,12 @@ function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, sum(abs2, [inner_loss(phi, f, autodiff, t, θ, p) for t in ts]) end end - optf = OptimizationFunction(loss, Optimization.AutoZygote()) + + return loss end function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tspan, p, - batch) + batch, additional_loss = nothing) function loss(θ, _) ts = adapt(parameterless_type(θ), [(tspan[2] - tspan[1]) * rand() + tspan[1] for i in 1:(strategy.points)]) @@ -281,7 +293,8 @@ function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tsp sum(abs2, [inner_loss(phi, f, autodiff, t, θ, p) for t in ts]) end end - optf = OptimizationFunction(loss, Optimization.AutoZygote()) + + return loss end function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Bool, tspan, p, @@ -312,10 +325,11 @@ function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Boo sum(abs2, [inner_loss(phi, f, autodiff, t, θ, p) for t in ts]) end end - optf = OptimizationFunction(loss, Optimization.AutoZygote()) + return loss end -function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan) +function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan, + additional_loss = nothing) error("QuasiRandomTraining is not supported by NNODE since it's for high dimensional spaces only. Use StochasticTraining instead.") end @@ -407,7 +421,11 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractODEProblem, alg.batch end - optf = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch) + # additional loss + additional_loss = alg.additional_loss + + optf = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch, + additional_loss) iteration = 0 callback = function (p, l) From 4f76ee3bb7c81722804ea0c54f5bef9122e93fe2 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sat, 1 Apr 2023 13:24:56 +0530 Subject: [PATCH 02/53] Added additional Loss function Feature for NNODE (user defined function), also now has common OptimizationFunction object definition(instead for each TrainingStrategy). --- src/ode_solve.jl | 38 ++++++++++++++++++++++++++++---------- 1 file changed, 28 insertions(+), 10 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 5556e211e..f28f18b54 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -81,8 +81,8 @@ struct NNODE{C, O, P, B, K, AL <: Union{Nothing, Function}, autodiff::Bool batch::B strategy::S - kwargs::K additional_loss::AL + kwargs::K end function NNODE(chain, opt, init_params = nothing; strategy = nothing, @@ -250,7 +250,7 @@ end Representation of the loss function, parametric on the training strategy `strategy` """ function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tspan, p, - batch, additional_loss = nothing) + batch) integrand(t::Number, θ) = abs2(inner_loss(phi, f, autodiff, t, θ, p)) integrand(ts, θ) = [abs2(inner_loss(phi, f, autodiff, t, θ, p)) for t in ts] @assert batch == 0 # not implemented @@ -264,12 +264,10 @@ function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tsp return loss end -function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, batch, - additional_loss = nothing) +function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, batch) ts = tspan[1]:(strategy.dx):tspan[2] # sum(abs2,inner_loss(t,θ) for t in ts) but Zygote generators are broken - function loss(θ, _) if batch sum(abs2, inner_loss(phi, f, autodiff, ts, θ, p)) @@ -281,8 +279,7 @@ function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, return loss end -function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tspan, p, - batch, additional_loss = nothing) +function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tspan, p,batch, additional_loss = nothing) function loss(θ, _) ts = adapt(parameterless_type(θ), [(tspan[2] - tspan[1]) * rand() + tspan[1] for i in 1:(strategy.points)]) @@ -329,7 +326,7 @@ function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Boo end function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan, - additional_loss = nothing) + additional_loss) error("QuasiRandomTraining is not supported by NNODE since it's for high dimensional spaces only. Use StochasticTraining instead.") end @@ -424,8 +421,29 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractODEProblem, # additional loss additional_loss = alg.additional_loss - optf = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch, - additional_loss) + # Computes total_loss + function total_loss(θ, _) + L2_loss = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch)(θ, phi) + if !(additional_loss isa Nothing) + return additional_loss(phi, θ) + L2_loss + end + L2_loss + end + + # Choice of Optimization Algo for Training Strategies + opt_algo = if strategy isa QuadratureTraining + Optimization.AutoForwardDiff() + elseif strategy isa StochasticTraining + Optimization.AutoZygote() + else + # by default GridTraining choice of Optimization + # if adding new training algorithms we can extend this, + # if-elseif-else block for choices of optimization algos + Optimization.AutoZygote() + end + + # Creates OptimizationFunction Object from total_loss + optf = OptimizationFunction(total_loss, opt_algo) iteration = 0 callback = function (p, l) From a15f6eb333affeb0b86e68ed37361217a1fffc96 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 02:46:18 +0530 Subject: [PATCH 03/53] Added Tests for additional_loss function feature Also updated docs(might need further editing) Future Scope: We can add weighted loss in NNODE,and even losses for Parameter Estimation for Inverse Problems. --- docs/src/manual/ode.md | 18 ++++++++ test/NNODE_tests.jl | 94 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 112 insertions(+) diff --git a/docs/src/manual/ode.md b/docs/src/manual/ode.md index 0cd20d406..b8fe44b92 100644 --- a/docs/src/manual/ode.md +++ b/docs/src/manual/ode.md @@ -2,4 +2,22 @@ ```@docs NNODE + +additional_loss as an argument for NNODE algorithm: +Any function which computes the additional_loss can be passed as an argument to NNODE algorithm call. + +example: +(u_, t_) = (u_analytical(sol.t), sol.t) +function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) +end + +alg1 = NeuralPDE.NNODE(chain, opt, strategy=StochasticTraining(1000), additional_loss=additional_loss) + +Here we define the additional loss function additional_loss(phi, θ ), the function has two arguments: + +phi the trial solution +θ the parameters of neural networks + +Note:Refering to above example phi can only take in t as a scalar at a time and θ as parameters of the network. ``` diff --git a/test/NNODE_tests.jl b/test/NNODE_tests.jl index ffc6d6815..37747e6fe 100644 --- a/test/NNODE_tests.jl +++ b/test/NNODE_tests.jl @@ -228,3 +228,97 @@ alg = NeuralPDE.NNODE(chain, opt, autodiff = false, sol = solve(prob_oop, alg, verbose = true, maxiters = 100000, saveat = 0.01) @test abs(mean(sol) - mean(true_sol)) < 0.2 + +# Checking if additional_loss feature works for NNODE +linear = (u, p, t) -> cos(2pi * t) +linear_analytic = (u, p, t) -> (1 / (2pi)) * sin(2pi * t) +tspan = (0.0f0, 1.0f0) +dt = (tspan[2] - tspan[1]) / 99 +ts = collect(tspan[1]:dt:tspan[2]) +prob = ODEProblem(ODEFunction(linear, analytic = linear_analytic), 0.0f0, (0.0f0, 1.0f0)) +opt = OptimizationOptimisers.Adam(0.1, (0.9, 0.95)) + +# Analytical solution +u_analytical(x) = (1 / (2pi)) .* sin.(2pi .* x) + +# GridTraining (Flux Chain) +chain = Flux.Chain(Dense(1, 5, σ), Dense(5, 1)) + +(u_, t_) = (u_analytical(ts), ts) +function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) +end + +alg1 = NeuralPDE.NNODE(chain, opt, strategy = GridTraining(0.01), + additional_loss = additional_loss) + +sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-8, maxiters = 500) +@test sol1.errors[:l2] < 0.5 + +# GridTraining (Lux Chain) +luxchain = Lux.Chain(Lux.Dense(1, 5, Lux.σ), Lux.Dense(5, 1)) + +(u_, t_) = (u_analytical(ts), ts) +function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) +end + +alg1 = NeuralPDE.NNODE(luxchain, opt, strategy = GridTraining(0.01), + additional_loss = additional_loss) + +sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-8, maxiters = 500) +@test sol1.errors[:l2] < 0.5 + +# QuadratureTraining (Flux Chain) +chain = Flux.Chain(Dense(1, 5, σ), Dense(5, 1)) + +(u_, t_) = (u_analytical(ts), ts) +function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) +end + +alg1 = NeuralPDE.NNODE(chain, opt, additional_loss = additional_loss) + +sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-10, maxiters = 200) +@test sol1.errors[:l2] < 0.5 + +# QuadratureTraining (Lux Chain) +luxchain = Lux.Chain(Lux.Dense(1, 5, Lux.σ), Lux.Dense(5, 1)) + +(u_, t_) = (u_analytical(ts), ts) +function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) +end + +alg1 = NeuralPDE.NNODE(luxchain, opt, additional_loss = additional_loss) + +sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-10, maxiters = 200) +@test sol1.errors[:l2] < 0.5 + +# StochasticTraining(Flux Chain) +chain = Flux.Chain(Dense(1, 5, σ), Dense(5, 1)) + +(u_, t_) = (u_analytical(ts), ts) +function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) +end + +alg1 = NeuralPDE.NNODE(chain, opt, strategy = StochasticTraining(1000), + additional_loss = additional_loss) + +sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-8, maxiters = 500) +@test sol1.errors[:l2] < 0.5 + +# StochasticTraining (Lux Chain) +luxchain = Lux.Chain(Lux.Dense(1, 5, Lux.σ), Lux.Dense(5, 1)) + +(u_, t_) = (u_analytical(ts), ts) +function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) +end + +alg1 = NeuralPDE.NNODE(luxchain, opt, strategy = StochasticTraining(1000), + additional_loss = additional_loss) + +sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-8, maxiters = 500) +@test sol1.errors[:l2] < 0.5 \ No newline at end of file From 69ecc9fa9add6b85254645f17278aa4574342959 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 03:34:08 +0530 Subject: [PATCH 04/53] formatted files --- test/NNODE_tests.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/NNODE_tests.jl b/test/NNODE_tests.jl index 37747e6fe..c04f3300b 100644 --- a/test/NNODE_tests.jl +++ b/test/NNODE_tests.jl @@ -321,4 +321,4 @@ alg1 = NeuralPDE.NNODE(luxchain, opt, strategy = StochasticTraining(1000), additional_loss = additional_loss) sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-8, maxiters = 500) -@test sol1.errors[:l2] < 0.5 \ No newline at end of file +@test sol1.errors[:l2] < 0.5 From 6508b352a2621bf19bb487439b36943bf37a4990 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 14:04:53 +0530 Subject: [PATCH 05/53] docs --- docs/src/manual/ode.md | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/docs/src/manual/ode.md b/docs/src/manual/ode.md index b8fe44b92..0cd20d406 100644 --- a/docs/src/manual/ode.md +++ b/docs/src/manual/ode.md @@ -2,22 +2,4 @@ ```@docs NNODE - -additional_loss as an argument for NNODE algorithm: -Any function which computes the additional_loss can be passed as an argument to NNODE algorithm call. - -example: -(u_, t_) = (u_analytical(sol.t), sol.t) -function additional_loss(phi, θ) - return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) -end - -alg1 = NeuralPDE.NNODE(chain, opt, strategy=StochasticTraining(1000), additional_loss=additional_loss) - -Here we define the additional loss function additional_loss(phi, θ ), the function has two arguments: - -phi the trial solution -θ the parameters of neural networks - -Note:Refering to above example phi can only take in t as a scalar at a time and θ as parameters of the network. ``` From 74af932fa79d625492dd29dddcb0f80184235900 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 19:06:37 +0530 Subject: [PATCH 06/53] Docs for new NNODE Argument:additional_loss --- src/ode_solve.jl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index f28f18b54..f312f6075 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -23,7 +23,6 @@ of the physics-informed neural network which is used as a solver for a standard which thus uses the random initialization provided by the neural network library. ## Keyword Arguments - * `additional_loss`: A function additional_loss(phi, θ) where phi are the neural network trial solutions, θ are the weights of the neural network(s). example: From 9b3d24909c37321d0ade1d8fcc95f2a252c43b31 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 22:42:21 +0530 Subject: [PATCH 07/53] fixed optimizations choice --- src/ode_solve.jl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index f312f6075..b2891aeb1 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -434,6 +434,8 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractODEProblem, Optimization.AutoForwardDiff() elseif strategy isa StochasticTraining Optimization.AutoZygote() + elseif strategy isa WeightedIntervalTraining + Optimization.AutoZygote() else # by default GridTraining choice of Optimization # if adding new training algorithms we can extend this, From 4197cdb0595e5fa853741c15892233ed05016339 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Mon, 3 Apr 2023 20:18:48 +0530 Subject: [PATCH 08/53] rebased --- test/NNODE_tests.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/test/NNODE_tests.jl b/test/NNODE_tests.jl index c04f3300b..1839f57c7 100644 --- a/test/NNODE_tests.jl +++ b/test/NNODE_tests.jl @@ -207,6 +207,7 @@ sol = solve(prob, NeuralPDE.NNODE(luxchain, opt; batch = true), verbose = true, abstol = 1.0f-8, dt = 1 / 5.0f0) @test sol.errors[:l2] < 0.5 +# WeightedIntervalTraining(Lux Chain) function f(u, p, t) [p[1] * u[1] - p[2] * u[1] * u[2], -p[3] * u[2] + p[4] * u[1] * u[2]] end From 68de9efb82068f1eecf6481678205c9b77559676 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Thu, 30 Mar 2023 15:09:24 +0530 Subject: [PATCH 09/53] added additional loss against data for NNODE(still needs reviewing) --- src/ode_solve.jl | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index b2891aeb1..82beae17a 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -82,6 +82,7 @@ struct NNODE{C, O, P, B, K, AL <: Union{Nothing, Function}, strategy::S additional_loss::AL kwargs::K + additional_loss::AL end function NNODE(chain, opt, init_params = nothing; strategy = nothing, @@ -278,7 +279,9 @@ function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, return loss end -function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tspan, p,batch, additional_loss = nothing) +function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tspan, p, + batch) + # sum(abs2,inner_loss(t,θ) for t in ts) but Zygote generators are broken function loss(θ, _) ts = adapt(parameterless_type(θ), [(tspan[2] - tspan[1]) * rand() + tspan[1] for i in 1:(strategy.points)]) @@ -324,8 +327,7 @@ function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Boo return loss end -function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan, - additional_loss) +function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan) error("QuasiRandomTraining is not supported by NNODE since it's for high dimensional spaces only. Use StochasticTraining instead.") end From b0d4718cb4696908481a971f16a6bdbadf31f637 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sat, 1 Apr 2023 13:24:56 +0530 Subject: [PATCH 10/53] Added additional Loss function Feature for NNODE (user defined function), also now has common OptimizationFunction object definition(instead for each TrainingStrategy). --- src/ode_solve.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 82beae17a..c4c1e4176 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -80,9 +80,9 @@ struct NNODE{C, O, P, B, K, AL <: Union{Nothing, Function}, autodiff::Bool batch::B strategy::S - additional_loss::AL kwargs::K additional_loss::AL + kwargs::K end function NNODE(chain, opt, init_params = nothing; strategy = nothing, From 45c573ec6d71bc880846af0ec4cfad3abfd78c94 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 02:46:18 +0530 Subject: [PATCH 11/53] Added Tests for additional_loss function feature Also updated docs(might need further editing) Future Scope: We can add weighted loss in NNODE,and even losses for Parameter Estimation for Inverse Problems. --- docs/src/manual/ode.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/docs/src/manual/ode.md b/docs/src/manual/ode.md index 0cd20d406..b8fe44b92 100644 --- a/docs/src/manual/ode.md +++ b/docs/src/manual/ode.md @@ -2,4 +2,22 @@ ```@docs NNODE + +additional_loss as an argument for NNODE algorithm: +Any function which computes the additional_loss can be passed as an argument to NNODE algorithm call. + +example: +(u_, t_) = (u_analytical(sol.t), sol.t) +function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) +end + +alg1 = NeuralPDE.NNODE(chain, opt, strategy=StochasticTraining(1000), additional_loss=additional_loss) + +Here we define the additional loss function additional_loss(phi, θ ), the function has two arguments: + +phi the trial solution +θ the parameters of neural networks + +Note:Refering to above example phi can only take in t as a scalar at a time and θ as parameters of the network. ``` From 0d658d66d2884982534f6136903fb4c262788190 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 14:04:53 +0530 Subject: [PATCH 12/53] docs --- docs/src/manual/ode.md | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/docs/src/manual/ode.md b/docs/src/manual/ode.md index b8fe44b92..0cd20d406 100644 --- a/docs/src/manual/ode.md +++ b/docs/src/manual/ode.md @@ -2,22 +2,4 @@ ```@docs NNODE - -additional_loss as an argument for NNODE algorithm: -Any function which computes the additional_loss can be passed as an argument to NNODE algorithm call. - -example: -(u_, t_) = (u_analytical(sol.t), sol.t) -function additional_loss(phi, θ) - return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) -end - -alg1 = NeuralPDE.NNODE(chain, opt, strategy=StochasticTraining(1000), additional_loss=additional_loss) - -Here we define the additional loss function additional_loss(phi, θ ), the function has two arguments: - -phi the trial solution -θ the parameters of neural networks - -Note:Refering to above example phi can only take in t as a scalar at a time and θ as parameters of the network. ``` From c810ee3ebec16103daf22ceb3ec3fdd1d56ff4ef Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 22:36:00 +0530 Subject: [PATCH 13/53] Mixed my PR with PR #635 by sdesai1287 --- src/ode_solve.jl | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index c4c1e4176..af03198f3 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -325,8 +325,42 @@ function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Boo end end return loss + optf = OptimizationFunction(loss, Optimization.AutoZygote()) end +function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Bool, tspan, p, + batch) + minT = tspan[1] + maxT = tspan[2] + + weights = strategy.weights ./ sum(strategy.weights) + + N = length(weights) + samples = strategy.samples + + difference = (maxT - minT) / N + + data = Float64[] + for (index, item) in enumerate(weights) + temp_data = rand(1, trunc(Int, samples * item)) .* difference .+ minT .+ + ((index - 1) * difference) + data = append!(data, temp_data) + end + + ts = data + + function loss(θ, _) + if batch + sum(abs2, inner_loss(phi, f, autodiff, ts, θ, p)) + else + sum(abs2, [inner_loss(phi, f, autodiff, t, θ, p) for t in ts]) + end + end + + return loss +end + + function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan) error("QuasiRandomTraining is not supported by NNODE since it's for high dimensional spaces only. Use StochasticTraining instead.") end From b9a460629da1adbc998ab3c034cca31cec092d17 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 22:42:21 +0530 Subject: [PATCH 14/53] fixed optimizations choice --- src/ode_solve.jl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index af03198f3..d5fafa096 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -325,7 +325,6 @@ function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Boo end end return loss - optf = OptimizationFunction(loss, Optimization.AutoZygote()) end function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Bool, tspan, p, From 9221e6abf50f3369225a72fceb5778536dbccc4f Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Thu, 30 Mar 2023 15:09:24 +0530 Subject: [PATCH 15/53] added additional loss against data for NNODE(still needs reviewing) --- src/ode_solve.jl | 40 +++++----------------------------------- 1 file changed, 5 insertions(+), 35 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index d5fafa096..babad7919 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -80,7 +80,6 @@ struct NNODE{C, O, P, B, K, AL <: Union{Nothing, Function}, autodiff::Bool batch::B strategy::S - kwargs::K additional_loss::AL kwargs::K end @@ -260,7 +259,7 @@ function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tsp sol = solve(intprob, QuadGKJL(); abstol = strategy.abstol, reltol = strategy.reltol) sol.u end - + return loss end @@ -292,38 +291,6 @@ function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tsp sum(abs2, [inner_loss(phi, f, autodiff, t, θ, p) for t in ts]) end end - - return loss -end - -function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Bool, tspan, p, - batch) - minT = tspan[1] - maxT = tspan[2] - - weights = strategy.weights ./ sum(strategy.weights) - - N = length(weights) - samples = strategy.samples - - difference = (maxT - minT) / N - - data = Float64[] - for (index, item) in enumerate(weights) - temp_data = rand(1, trunc(Int, samples * item)) .* difference .+ minT .+ - ((index - 1) * difference) - data = append!(data, temp_data) - end - - ts = data - - function loss(θ, _) - if batch - sum(abs2, inner_loss(phi, f, autodiff, ts, θ, p)) - else - sum(abs2, [inner_loss(phi, f, autodiff, t, θ, p) for t in ts]) - end - end return loss end @@ -455,9 +422,12 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractODEProblem, # additional loss additional_loss = alg.additional_loss + # Computes total_loss function total_loss(θ, _) - L2_loss = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch)(θ, phi) + L2_loss = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch, + additional_loss) + if !(additional_loss isa Nothing) return additional_loss(phi, θ) + L2_loss end From 05afb08725453856abeb4824aaa3ea13dc94e79e Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sat, 1 Apr 2023 13:24:56 +0530 Subject: [PATCH 16/53] Added additional Loss function Feature for NNODE (user defined function), also now has common OptimizationFunction object definition(instead for each TrainingStrategy). --- src/ode_solve.jl | 32 +++++++++----------------------- 1 file changed, 9 insertions(+), 23 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index babad7919..a44ef6cc7 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -259,11 +259,12 @@ function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tsp sol = solve(intprob, QuadGKJL(); abstol = strategy.abstol, reltol = strategy.reltol) sol.u end - + return loss end function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, batch) + ts = tspan[1]:(strategy.dx):tspan[2] # sum(abs2,inner_loss(t,θ) for t in ts) but Zygote generators are broken @@ -306,24 +307,14 @@ function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Boo difference = (maxT - minT) / N - data = Float64[] - for (index, item) in enumerate(weights) - temp_data = rand(1, trunc(Int, samples * item)) .* difference .+ minT .+ - ((index - 1) * difference) - data = append!(data, temp_data) - end - - ts = data - - function loss(θ, _) - if batch - sum(abs2, inner_loss(phi, f, autodiff, ts, θ, p)) - else - sum(abs2, [inner_loss(phi, f, autodiff, t, θ, p) for t in ts]) - end + # total loss + total_loss = if additional_loss isa Nothing + loss + else + loss + additional_loss(phi, θ) end - return loss + optf = OptimizationFunction(total_loss, Optimization.AutoZygote()) end @@ -422,12 +413,9 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractODEProblem, # additional loss additional_loss = alg.additional_loss - # Computes total_loss function total_loss(θ, _) - L2_loss = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch, - additional_loss) - + L2_loss = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch)(θ, phi) if !(additional_loss isa Nothing) return additional_loss(phi, θ) + L2_loss end @@ -439,8 +427,6 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractODEProblem, Optimization.AutoForwardDiff() elseif strategy isa StochasticTraining Optimization.AutoZygote() - elseif strategy isa WeightedIntervalTraining - Optimization.AutoZygote() else # by default GridTraining choice of Optimization # if adding new training algorithms we can extend this, From 9c7f3f2d205fb1028ae010a5be762acd957e9901 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 02:46:18 +0530 Subject: [PATCH 17/53] Added Tests for additional_loss function feature Also updated docs(might need further editing) Future Scope: We can add weighted loss in NNODE,and even losses for Parameter Estimation for Inverse Problems. --- docs/src/manual/ode.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/docs/src/manual/ode.md b/docs/src/manual/ode.md index 0cd20d406..b8fe44b92 100644 --- a/docs/src/manual/ode.md +++ b/docs/src/manual/ode.md @@ -2,4 +2,22 @@ ```@docs NNODE + +additional_loss as an argument for NNODE algorithm: +Any function which computes the additional_loss can be passed as an argument to NNODE algorithm call. + +example: +(u_, t_) = (u_analytical(sol.t), sol.t) +function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) +end + +alg1 = NeuralPDE.NNODE(chain, opt, strategy=StochasticTraining(1000), additional_loss=additional_loss) + +Here we define the additional loss function additional_loss(phi, θ ), the function has two arguments: + +phi the trial solution +θ the parameters of neural networks + +Note:Refering to above example phi can only take in t as a scalar at a time and θ as parameters of the network. ``` From a96aef3c3ce09f830636d1238f2cf3adad4eda83 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 14:04:53 +0530 Subject: [PATCH 18/53] docs --- docs/src/manual/ode.md | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/docs/src/manual/ode.md b/docs/src/manual/ode.md index b8fe44b92..0cd20d406 100644 --- a/docs/src/manual/ode.md +++ b/docs/src/manual/ode.md @@ -2,22 +2,4 @@ ```@docs NNODE - -additional_loss as an argument for NNODE algorithm: -Any function which computes the additional_loss can be passed as an argument to NNODE algorithm call. - -example: -(u_, t_) = (u_analytical(sol.t), sol.t) -function additional_loss(phi, θ) - return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) -end - -alg1 = NeuralPDE.NNODE(chain, opt, strategy=StochasticTraining(1000), additional_loss=additional_loss) - -Here we define the additional loss function additional_loss(phi, θ ), the function has two arguments: - -phi the trial solution -θ the parameters of neural networks - -Note:Refering to above example phi can only take in t as a scalar at a time and θ as parameters of the network. ``` From 2847912e4414c1c78a96556c4b61670e51ec57f2 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 22:36:00 +0530 Subject: [PATCH 19/53] Mixed my PR with PR #635 by sdesai1287 --- src/ode_solve.jl | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index a44ef6cc7..05e109623 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -307,16 +307,25 @@ function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Boo difference = (maxT - minT) / N - # total loss - total_loss = if additional_loss isa Nothing - loss - else - loss + additional_loss(phi, θ) + data = Float64[] + for (index, item) in enumerate(weights) + temp_data = rand(1, trunc(Int, samples * item)) .* difference .+ minT .+ + ((index - 1) * difference) + data = append!(data, temp_data) end - optf = OptimizationFunction(total_loss, Optimization.AutoZygote()) -end + ts = data + + function loss(θ, _) + if batch + sum(abs2, inner_loss(phi, f, autodiff, ts, θ, p)) + else + sum(abs2, [inner_loss(phi, f, autodiff, t, θ, p) for t in ts]) + end + end + return loss +end function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan) error("QuasiRandomTraining is not supported by NNODE since it's for high dimensional spaces only. Use StochasticTraining instead.") From b1733146acba98b56e7324342ece62e39593f549 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 22:42:21 +0530 Subject: [PATCH 20/53] fixed optimizations choice --- src/ode_solve.jl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 05e109623..39338d1ad 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -436,6 +436,8 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractODEProblem, Optimization.AutoForwardDiff() elseif strategy isa StochasticTraining Optimization.AutoZygote() + elseif strategy isa WeightedIntervalTraining + Optimization.AutoZygote() else # by default GridTraining choice of Optimization # if adding new training algorithms we can extend this, From 1dcdc965d3cb996664d9fb3527db9ca0bb0f5540 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Thu, 30 Mar 2023 15:09:24 +0530 Subject: [PATCH 21/53] added additional loss against data for NNODE(still needs reviewing) --- src/ode_solve.jl | 97 +++++++++++++++++------------------------------- 1 file changed, 35 insertions(+), 62 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 39338d1ad..3a766cbaa 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -70,8 +70,7 @@ is an accurate interpolation (up to the neural network training result). In addi Lagaris, Isaac E., Aristidis Likas, and Dimitrios I. Fotiadis. "Artificial neural networks for solving ordinary and partial differential equations." IEEE Transactions on Neural Networks 9, no. 5 (1998): 987-1000. """ -struct NNODE{C, O, P, B, K, AL <: Union{Nothing, Function}, - S <: Union{Nothing, AbstractTrainingStrategy} +struct NNODE{C, O, P, B, K, AL, S <: Union{Nothing, AbstractTrainingStrategy} } <: NeuralPDEAlgorithm chain::C @@ -82,10 +81,13 @@ struct NNODE{C, O, P, B, K, AL <: Union{Nothing, Function}, strategy::S additional_loss::AL kwargs::K + additional_loss::AL end function NNODE(chain, opt, init_params = nothing; strategy = nothing, autodiff = false, batch = nothing, additional_loss = nothing, kwargs...) + NNODE(chain, opt, init_params, autodiff, batch, strategy, additional_loss, kwargs) + autodiff = false, batch = nothing, additional_loss = nothing, kwargs...) NNODE(chain, opt, init_params, autodiff, batch, strategy, additional_loss, kwargs) end @@ -247,9 +249,10 @@ end """ Representation of the loss function, parametric on the training strategy `strategy` +Representation of the loss function, parametric on the training strategy `strategy` """ function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tspan, p, - batch) + batch, additional_loss = nothing) integrand(t::Number, θ) = abs2(inner_loss(phi, f, autodiff, t, θ, p)) integrand(ts, θ) = [abs2(inner_loss(phi, f, autodiff, t, θ, p)) for t in ts] @assert batch == 0 # not implemented @@ -260,11 +263,18 @@ function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tsp sol.u end - return loss + # total loss + total_loss = if additional_loss isa Nothing + loss + else + loss + additional_loss(phi, θ) + end + # Default this to ForwardDiff until Integrals.jl autodiff is sorted out + OptimizationFunction(total_loss, Optimization.AutoForwardDiff()) end -function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, batch) - +function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, batch, + additional_loss = nothing) ts = tspan[1]:(strategy.dx):tspan[2] # sum(abs2,inner_loss(t,θ) for t in ts) but Zygote generators are broken @@ -276,11 +286,18 @@ function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, end end - return loss + # total loss + total_loss = if additional_loss isa Nothing + loss + else + loss + additional_loss(phi, θ) + end + + optf = OptimizationFunction(total_loss, Optimization.AutoZygote()) end function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tspan, p, - batch) + batch, additional_loss = nothing) # sum(abs2,inner_loss(t,θ) for t in ts) but Zygote generators are broken function loss(θ, _) ts = adapt(parameterless_type(θ), @@ -292,41 +309,18 @@ function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tsp sum(abs2, [inner_loss(phi, f, autodiff, t, θ, p) for t in ts]) end end - return loss -end - -function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Bool, tspan, p, - batch) - minT = tspan[1] - maxT = tspan[2] - weights = strategy.weights ./ sum(strategy.weights) - - N = length(weights) - samples = strategy.samples - - difference = (maxT - minT) / N - - data = Float64[] - for (index, item) in enumerate(weights) - temp_data = rand(1, trunc(Int, samples * item)) .* difference .+ minT .+ - ((index - 1) * difference) - data = append!(data, temp_data) - end - - ts = data - - function loss(θ, _) - if batch - sum(abs2, inner_loss(phi, f, autodiff, ts, θ, p)) - else - sum(abs2, [inner_loss(phi, f, autodiff, t, θ, p) for t in ts]) - end + # total loss + total_loss = if additional_loss isa Nothing + loss + else + loss + additional_loss(phi, θ) end - return loss + optf = OptimizationFunction(total_loss, Optimization.AutoZygote()) end + function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan) error("QuasiRandomTraining is not supported by NNODE since it's for high dimensional spaces only. Use StochasticTraining instead.") end @@ -422,31 +416,10 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractODEProblem, # additional loss additional_loss = alg.additional_loss - # Computes total_loss - function total_loss(θ, _) - L2_loss = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch)(θ, phi) - if !(additional_loss isa Nothing) - return additional_loss(phi, θ) + L2_loss - end - L2_loss - end - - # Choice of Optimization Algo for Training Strategies - opt_algo = if strategy isa QuadratureTraining - Optimization.AutoForwardDiff() - elseif strategy isa StochasticTraining - Optimization.AutoZygote() - elseif strategy isa WeightedIntervalTraining - Optimization.AutoZygote() - else - # by default GridTraining choice of Optimization - # if adding new training algorithms we can extend this, - # if-elseif-else block for choices of optimization algos - Optimization.AutoZygote() - end + optf = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch, + additional_loss) - # Creates OptimizationFunction Object from total_loss - optf = OptimizationFunction(total_loss, opt_algo) + # optf = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch) iteration = 0 callback = function (p, l) From c0a5703f679e224af6e19aeead1bfae4514c7570 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sat, 1 Apr 2023 13:24:56 +0530 Subject: [PATCH 22/53] Added additional Loss function Feature for NNODE (user defined function), also now has common OptimizationFunction object definition(instead for each TrainingStrategy). --- src/ode_solve.jl | 64 +++++++++++++++++++++++------------------------- 1 file changed, 30 insertions(+), 34 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 3a766cbaa..b191c8b78 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -70,8 +70,8 @@ is an accurate interpolation (up to the neural network training result). In addi Lagaris, Isaac E., Aristidis Likas, and Dimitrios I. Fotiadis. "Artificial neural networks for solving ordinary and partial differential equations." IEEE Transactions on Neural Networks 9, no. 5 (1998): 987-1000. """ -struct NNODE{C, O, P, B, K, AL, S <: Union{Nothing, AbstractTrainingStrategy} - } <: +struct NNODE{C, O, P, B, K, S <: Union{Nothing, AbstractTrainingStrategy}, + AL <: Union{Nothing, Function}} <: NeuralPDEAlgorithm chain::C opt::O @@ -81,7 +81,6 @@ struct NNODE{C, O, P, B, K, AL, S <: Union{Nothing, AbstractTrainingStrategy} strategy::S additional_loss::AL kwargs::K - additional_loss::AL end function NNODE(chain, opt, init_params = nothing; strategy = nothing, @@ -252,7 +251,7 @@ Representation of the loss function, parametric on the training strategy `strate Representation of the loss function, parametric on the training strategy `strategy` """ function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tspan, p, - batch, additional_loss = nothing) + batch) integrand(t::Number, θ) = abs2(inner_loss(phi, f, autodiff, t, θ, p)) integrand(ts, θ) = [abs2(inner_loss(phi, f, autodiff, t, θ, p)) for t in ts] @assert batch == 0 # not implemented @@ -263,18 +262,10 @@ function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tsp sol.u end - # total loss - total_loss = if additional_loss isa Nothing - loss - else - loss + additional_loss(phi, θ) - end - # Default this to ForwardDiff until Integrals.jl autodiff is sorted out - OptimizationFunction(total_loss, Optimization.AutoForwardDiff()) + return loss end -function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, batch, - additional_loss = nothing) +function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, batch) ts = tspan[1]:(strategy.dx):tspan[2] # sum(abs2,inner_loss(t,θ) for t in ts) but Zygote generators are broken @@ -286,18 +277,11 @@ function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, end end - # total loss - total_loss = if additional_loss isa Nothing - loss - else - loss + additional_loss(phi, θ) - end - - optf = OptimizationFunction(total_loss, Optimization.AutoZygote()) + return loss end function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tspan, p, - batch, additional_loss = nothing) + batch) # sum(abs2,inner_loss(t,θ) for t in ts) but Zygote generators are broken function loss(θ, _) ts = adapt(parameterless_type(θ), @@ -310,14 +294,7 @@ function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tsp end end - # total loss - total_loss = if additional_loss isa Nothing - loss - else - loss + additional_loss(phi, θ) - end - - optf = OptimizationFunction(total_loss, Optimization.AutoZygote()) + return loss end @@ -416,10 +393,29 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractODEProblem, # additional loss additional_loss = alg.additional_loss - optf = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch, - additional_loss) + # Computes total_loss + function total_loss(θ, _) + L2_loss = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch)(θ, phi) + if !(additional_loss isa Nothing) + return additional_loss(phi, θ) + L2_loss + end + L2_loss + end + + # Choice of Optimization Algo for Training Strategies + opt_algo = if strategy isa QuadratureTraining + Optimization.AutoForwardDiff() + elseif strategy isa StochasticTraining + Optimization.AutoZygote() + else + # by default GridTraining choice of Optimization + # if adding new training algorithms we can extend this, + # if-elseif-else block for choices of optimization algos + Optimization.AutoZygote() + end - # optf = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch) + # Creates OptimizationFunction Object from total_loss + optf = OptimizationFunction(total_loss, opt_algo) iteration = 0 callback = function (p, l) From df085eead9470e0783e150aa27d240c24ca0c7d6 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 02:46:18 +0530 Subject: [PATCH 23/53] Added Tests for additional_loss function feature Also updated docs(might need further editing) Future Scope: We can add weighted loss in NNODE,and even losses for Parameter Estimation for Inverse Problems. --- docs/src/manual/ode.md | 18 +++++ test/NNODE_tests.jl | 156 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 174 insertions(+) diff --git a/docs/src/manual/ode.md b/docs/src/manual/ode.md index 0cd20d406..b8fe44b92 100644 --- a/docs/src/manual/ode.md +++ b/docs/src/manual/ode.md @@ -2,4 +2,22 @@ ```@docs NNODE + +additional_loss as an argument for NNODE algorithm: +Any function which computes the additional_loss can be passed as an argument to NNODE algorithm call. + +example: +(u_, t_) = (u_analytical(sol.t), sol.t) +function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) +end + +alg1 = NeuralPDE.NNODE(chain, opt, strategy=StochasticTraining(1000), additional_loss=additional_loss) + +Here we define the additional loss function additional_loss(phi, θ ), the function has two arguments: + +phi the trial solution +θ the parameters of neural networks + +Note:Refering to above example phi can only take in t as a scalar at a time and θ as parameters of the network. ``` diff --git a/test/NNODE_tests.jl b/test/NNODE_tests.jl index 1839f57c7..d2dbf79dd 100644 --- a/test/NNODE_tests.jl +++ b/test/NNODE_tests.jl @@ -207,6 +207,68 @@ sol = solve(prob, NeuralPDE.NNODE(luxchain, opt; batch = true), verbose = true, abstol = 1.0f-8, dt = 1 / 5.0f0) @test sol.errors[:l2] < 0.5 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + # WeightedIntervalTraining(Lux Chain) function f(u, p, t) [p[1] * u[1] - p[2] * u[1] * u[2], -p[3] * u[2] + p[4] * u[1] * u[2]] @@ -323,3 +385,97 @@ alg1 = NeuralPDE.NNODE(luxchain, opt, strategy = StochasticTraining(1000), sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-8, maxiters = 500) @test sol1.errors[:l2] < 0.5 + +# Checking if additional_loss feature works for NNODE +linear = (u, p, t) -> cos(2pi * t) +linear_analytic = (u, p, t) -> (1 / (2pi)) * sin(2pi * t) +tspan = (0.0f0, 1.0f0) +dt = (tspan[2] - tspan[1]) / 99 +ts = collect(tspan[1]:dt:tspan[2]) +prob = ODEProblem(ODEFunction(linear, analytic = linear_analytic), 0.0f0, (0.0f0, 1.0f0)) +opt = OptimizationOptimisers.Adam(0.1, (0.9, 0.95)) + +# Analytical solution +u_analytical(x) = (1 / (2pi)) .* sin.(2pi .* x) + +# GridTraining (Flux Chain) +chain = Flux.Chain(Dense(1, 5, σ), Dense(5, 1)) + +(u_, t_) = (u_analytical(ts), ts) +function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) +end + +alg1 = NeuralPDE.NNODE(chain, opt, strategy = GridTraining(0.01), + additional_loss = additional_loss) + +sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-8, maxiters = 500) +@test sol1.errors[:l2] < 0.5 + +# GridTraining (Lux Chain) +luxchain = Lux.Chain(Lux.Dense(1, 5, Lux.σ), Lux.Dense(5, 1)) + +(u_, t_) = (u_analytical(ts), ts) +function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) +end + +alg1 = NeuralPDE.NNODE(luxchain, opt, strategy = GridTraining(0.01), + additional_loss = additional_loss) + +sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-8, maxiters = 500) +@test sol1.errors[:l2] < 0.5 + +# QuadratureTraining (Flux Chain) +chain = Flux.Chain(Dense(1, 5, σ), Dense(5, 1)) + +(u_, t_) = (u_analytical(ts), ts) +function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) +end + +alg1 = NeuralPDE.NNODE(chain, opt, additional_loss = additional_loss) + +sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-10, maxiters = 200) +@test sol1.errors[:l2] < 0.5 + +# QuadratureTraining (Lux Chain) +luxchain = Lux.Chain(Lux.Dense(1, 5, Lux.σ), Lux.Dense(5, 1)) + +(u_, t_) = (u_analytical(ts), ts) +function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) +end + +alg1 = NeuralPDE.NNODE(luxchain, opt, additional_loss = additional_loss) + +sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-10, maxiters = 200) +@test sol1.errors[:l2] < 0.5 + +# StochasticTraining(Flux Chain) +chain = Flux.Chain(Dense(1, 5, σ), Dense(5, 1)) + +(u_, t_) = (u_analytical(ts), ts) +function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) +end + +alg1 = NeuralPDE.NNODE(chain, opt, strategy = StochasticTraining(1000), + additional_loss = additional_loss) + +sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-8, maxiters = 500) +@test sol1.errors[:l2] < 0.5 + +# StochasticTraining (Lux Chain) +luxchain = Lux.Chain(Lux.Dense(1, 5, Lux.σ), Lux.Dense(5, 1)) + +(u_, t_) = (u_analytical(ts), ts) +function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) +end + +alg1 = NeuralPDE.NNODE(luxchain, opt, strategy = StochasticTraining(1000), + additional_loss = additional_loss) + +sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-8, maxiters = 500) +@test sol1.errors[:l2] < 0.5 \ No newline at end of file From 2352c2353fe936efa239e77830d2f9d6eaa67e15 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 03:34:08 +0530 Subject: [PATCH 24/53] formatted files --- test/NNODE_tests.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/NNODE_tests.jl b/test/NNODE_tests.jl index d2dbf79dd..233623050 100644 --- a/test/NNODE_tests.jl +++ b/test/NNODE_tests.jl @@ -478,4 +478,4 @@ alg1 = NeuralPDE.NNODE(luxchain, opt, strategy = StochasticTraining(1000), additional_loss = additional_loss) sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-8, maxiters = 500) -@test sol1.errors[:l2] < 0.5 \ No newline at end of file +@test sol1.errors[:l2] < 0.5 From e544d1f31b234ca686a4254f410ff008af9df354 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 14:04:53 +0530 Subject: [PATCH 25/53] docs --- docs/src/manual/ode.md | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/docs/src/manual/ode.md b/docs/src/manual/ode.md index b8fe44b92..0cd20d406 100644 --- a/docs/src/manual/ode.md +++ b/docs/src/manual/ode.md @@ -2,22 +2,4 @@ ```@docs NNODE - -additional_loss as an argument for NNODE algorithm: -Any function which computes the additional_loss can be passed as an argument to NNODE algorithm call. - -example: -(u_, t_) = (u_analytical(sol.t), sol.t) -function additional_loss(phi, θ) - return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) -end - -alg1 = NeuralPDE.NNODE(chain, opt, strategy=StochasticTraining(1000), additional_loss=additional_loss) - -Here we define the additional loss function additional_loss(phi, θ ), the function has two arguments: - -phi the trial solution -θ the parameters of neural networks - -Note:Refering to above example phi can only take in t as a scalar at a time and θ as parameters of the network. ``` From 3e43a16d99c50333994b3b1f1b703200f677e8ff Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 22:36:00 +0530 Subject: [PATCH 26/53] Mixed my PR with PR #635 by sdesai1287 --- src/ode_solve.jl | 5 +- test/NNODE_tests.jl | 129 +------------------------------------------- 2 files changed, 3 insertions(+), 131 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index b191c8b78..2c6a6f1c3 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -248,7 +248,6 @@ end """ Representation of the loss function, parametric on the training strategy `strategy` -Representation of the loss function, parametric on the training strategy `strategy` """ function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tspan, p, batch) @@ -286,7 +285,6 @@ function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tsp function loss(θ, _) ts = adapt(parameterless_type(θ), [(tspan[2] - tspan[1]) * rand() + tspan[1] for i in 1:(strategy.points)]) - if batch sum(abs2, inner_loss(phi, f, autodiff, ts, θ, p)) else @@ -298,7 +296,8 @@ function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tsp end -function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan) +function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan, + additional_loss) error("QuasiRandomTraining is not supported by NNODE since it's for high dimensional spaces only. Use StochasticTraining instead.") end diff --git a/test/NNODE_tests.jl b/test/NNODE_tests.jl index 233623050..f7443d1ed 100644 --- a/test/NNODE_tests.jl +++ b/test/NNODE_tests.jl @@ -1,6 +1,7 @@ using Test, Flux using Random, NeuralPDE using OrdinaryDiffEq, Optimisers, Statistics +using OrdinaryDiffEq, Optimisers, Statistics import Lux, OptimizationOptimisers, OptimizationOptimJL Random.seed!(100) @@ -257,134 +258,6 @@ sol = solve(prob, NeuralPDE.NNODE(luxchain, opt; batch = true), verbose = true, - - - - - - - - - - - - -# WeightedIntervalTraining(Lux Chain) -function f(u, p, t) - [p[1] * u[1] - p[2] * u[1] * u[2], -p[3] * u[2] + p[4] * u[1] * u[2]] -end - -p = [1.5, 1.0, 3.0, 1.0] -u0 = [1.0, 1.0] -prob_oop = ODEProblem{false}(f, u0, (0.0, 3.0), p) -true_sol = solve(prob_oop, Tsit5(), saveat = 0.01) -func = Lux.σ -N = 12 -chain = Lux.Chain(Lux.Dense(1, N, func), Lux.Dense(N, N, func), Lux.Dense(N, N, func), - Lux.Dense(N, N, func), Lux.Dense(N, length(u0))) - -opt = Optimisers.Adam(0.01) -weights = [0.7, 0.2, 0.1] -samples = 200 -alg = NeuralPDE.NNODE(chain, opt, autodiff = false, - strategy = NeuralPDE.WeightedIntervalTraining(weights, samples)) -sol = solve(prob_oop, alg, verbose = true, maxiters = 100000, saveat = 0.01) - -@test abs(mean(sol) - mean(true_sol)) < 0.2 - -# Checking if additional_loss feature works for NNODE -linear = (u, p, t) -> cos(2pi * t) -linear_analytic = (u, p, t) -> (1 / (2pi)) * sin(2pi * t) -tspan = (0.0f0, 1.0f0) -dt = (tspan[2] - tspan[1]) / 99 -ts = collect(tspan[1]:dt:tspan[2]) -prob = ODEProblem(ODEFunction(linear, analytic = linear_analytic), 0.0f0, (0.0f0, 1.0f0)) -opt = OptimizationOptimisers.Adam(0.1, (0.9, 0.95)) - -# Analytical solution -u_analytical(x) = (1 / (2pi)) .* sin.(2pi .* x) - -# GridTraining (Flux Chain) -chain = Flux.Chain(Dense(1, 5, σ), Dense(5, 1)) - -(u_, t_) = (u_analytical(ts), ts) -function additional_loss(phi, θ) - return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) -end - -alg1 = NeuralPDE.NNODE(chain, opt, strategy = GridTraining(0.01), - additional_loss = additional_loss) - -sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-8, maxiters = 500) -@test sol1.errors[:l2] < 0.5 - -# GridTraining (Lux Chain) -luxchain = Lux.Chain(Lux.Dense(1, 5, Lux.σ), Lux.Dense(5, 1)) - -(u_, t_) = (u_analytical(ts), ts) -function additional_loss(phi, θ) - return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) -end - -alg1 = NeuralPDE.NNODE(luxchain, opt, strategy = GridTraining(0.01), - additional_loss = additional_loss) - -sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-8, maxiters = 500) -@test sol1.errors[:l2] < 0.5 - -# QuadratureTraining (Flux Chain) -chain = Flux.Chain(Dense(1, 5, σ), Dense(5, 1)) - -(u_, t_) = (u_analytical(ts), ts) -function additional_loss(phi, θ) - return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) -end - -alg1 = NeuralPDE.NNODE(chain, opt, additional_loss = additional_loss) - -sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-10, maxiters = 200) -@test sol1.errors[:l2] < 0.5 - -# QuadratureTraining (Lux Chain) -luxchain = Lux.Chain(Lux.Dense(1, 5, Lux.σ), Lux.Dense(5, 1)) - -(u_, t_) = (u_analytical(ts), ts) -function additional_loss(phi, θ) - return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) -end - -alg1 = NeuralPDE.NNODE(luxchain, opt, additional_loss = additional_loss) - -sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-10, maxiters = 200) -@test sol1.errors[:l2] < 0.5 - -# StochasticTraining(Flux Chain) -chain = Flux.Chain(Dense(1, 5, σ), Dense(5, 1)) - -(u_, t_) = (u_analytical(ts), ts) -function additional_loss(phi, θ) - return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) -end - -alg1 = NeuralPDE.NNODE(chain, opt, strategy = StochasticTraining(1000), - additional_loss = additional_loss) - -sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-8, maxiters = 500) -@test sol1.errors[:l2] < 0.5 - -# StochasticTraining (Lux Chain) -luxchain = Lux.Chain(Lux.Dense(1, 5, Lux.σ), Lux.Dense(5, 1)) - -(u_, t_) = (u_analytical(ts), ts) -function additional_loss(phi, θ) - return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) -end - -alg1 = NeuralPDE.NNODE(luxchain, opt, strategy = StochasticTraining(1000), - additional_loss = additional_loss) - -sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-8, maxiters = 500) -@test sol1.errors[:l2] < 0.5 # Checking if additional_loss feature works for NNODE linear = (u, p, t) -> cos(2pi * t) From 79dd8fe2fd4087d80e4f190496d462c207bd340f Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 22:42:21 +0530 Subject: [PATCH 27/53] fixed optimizations choice --- src/ode_solve.jl | 53 ++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 51 insertions(+), 2 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 2c6a6f1c3..69e2ec209 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -85,8 +85,6 @@ end function NNODE(chain, opt, init_params = nothing; strategy = nothing, autodiff = false, batch = nothing, additional_loss = nothing, kwargs...) - NNODE(chain, opt, init_params, autodiff, batch, strategy, additional_loss, kwargs) - autodiff = false, batch = nothing, additional_loss = nothing, kwargs...) NNODE(chain, opt, init_params, autodiff, batch, strategy, additional_loss, kwargs) end @@ -296,6 +294,55 @@ function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tsp end + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan, additional_loss) error("QuasiRandomTraining is not supported by NNODE since it's for high dimensional spaces only. Use StochasticTraining instead.") @@ -406,6 +453,8 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractODEProblem, Optimization.AutoForwardDiff() elseif strategy isa StochasticTraining Optimization.AutoZygote() + elseif strategy isa WeightedIntervalTraining + Optimization.AutoZygote() else # by default GridTraining choice of Optimization # if adding new training algorithms we can extend this, From e28a3f21073e345d7fd475e6c49367eb3a270a94 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Mon, 3 Apr 2023 20:18:48 +0530 Subject: [PATCH 28/53] rebased --- test/NNODE_tests.jl | 51 +++------------------------------------------ 1 file changed, 3 insertions(+), 48 deletions(-) diff --git a/test/NNODE_tests.jl b/test/NNODE_tests.jl index f7443d1ed..d4dd5b0dd 100644 --- a/test/NNODE_tests.jl +++ b/test/NNODE_tests.jl @@ -207,54 +207,9 @@ sol = solve(prob, NeuralPDE.NNODE(luxchain, opt; batch = true), verbose = true, maxiters = 400, abstol = 1.0f-8, dt = 1 / 5.0f0) @test sol.errors[:l2] < 0.5 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +function f(u, p, t) + [p[1] * u[1] - p[2] * u[1] * u[2], -p[3] * u[2] + p[4] * u[1] * u[2]] +end From d8e06ccbe378e50ca143e40c1498a221a5ed15de Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Tue, 4 Apr 2023 02:05:47 +0530 Subject: [PATCH 29/53] Actually performed Rebase and Formatted some files --- src/ode_solve.jl | 51 +----------------------------------------------- 1 file changed, 1 insertion(+), 50 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 69e2ec209..8ac00fb7e 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -295,56 +295,7 @@ end - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan, - additional_loss) +function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan) error("QuasiRandomTraining is not supported by NNODE since it's for high dimensional spaces only. Use StochasticTraining instead.") end From 2d2018177756dc3cf70d57c1c984cee49c2a6c68 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Tue, 4 Apr 2023 13:38:42 +0530 Subject: [PATCH 30/53] fixed ode_solve.jl rebase issues --- src/ode_solve.jl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 8ac00fb7e..78cbcfc09 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -70,8 +70,9 @@ is an accurate interpolation (up to the neural network training result). In addi Lagaris, Isaac E., Aristidis Likas, and Dimitrios I. Fotiadis. "Artificial neural networks for solving ordinary and partial differential equations." IEEE Transactions on Neural Networks 9, no. 5 (1998): 987-1000. """ -struct NNODE{C, O, P, B, K, S <: Union{Nothing, AbstractTrainingStrategy}, - AL <: Union{Nothing, Function}} <: +struct NNODE{C, O, P, B, K, AL <: Union{Nothing, Function}, + S <: Union{Nothing, AbstractTrainingStrategy} + } <: NeuralPDEAlgorithm chain::C opt::O From c25a23362b3cab48705f1acefd4449d16435ead1 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Thu, 30 Mar 2023 15:09:24 +0530 Subject: [PATCH 31/53] added additional loss against data for NNODE(still needs reviewing) --- src/ode_solve.jl | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 78cbcfc09..39bb40dca 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -70,9 +70,7 @@ is an accurate interpolation (up to the neural network training result). In addi Lagaris, Isaac E., Aristidis Likas, and Dimitrios I. Fotiadis. "Artificial neural networks for solving ordinary and partial differential equations." IEEE Transactions on Neural Networks 9, no. 5 (1998): 987-1000. """ -struct NNODE{C, O, P, B, K, AL <: Union{Nothing, Function}, - S <: Union{Nothing, AbstractTrainingStrategy} - } <: +struct NNODE{C, O, P, B, K, S <: Union{Nothing, AbstractTrainingStrategy}} <: NeuralPDEAlgorithm chain::C opt::O @@ -85,8 +83,8 @@ struct NNODE{C, O, P, B, K, AL <: Union{Nothing, Function}, end function NNODE(chain, opt, init_params = nothing; strategy = nothing, - autodiff = false, batch = nothing, additional_loss = nothing, kwargs...) - NNODE(chain, opt, init_params, autodiff, batch, strategy, additional_loss, kwargs) + autodiff = false, batch = nothing, kwargs...) + NNODE(chain, opt, init_params, autodiff, batch, strategy, kwargs) end """ @@ -249,7 +247,7 @@ end Representation of the loss function, parametric on the training strategy `strategy` """ function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tspan, p, - batch) + batch, additional_loss = nothing) integrand(t::Number, θ) = abs2(inner_loss(phi, f, autodiff, t, θ, p)) integrand(ts, θ) = [abs2(inner_loss(phi, f, autodiff, t, θ, p)) for t in ts] @assert batch == 0 # not implemented @@ -260,10 +258,12 @@ function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tsp sol.u end - return loss + # Default this to ForwardDiff until Integrals.jl autodiff is sorted out + OptimizationFunction(loss, Optimization.AutoForwardDiff()) end -function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, batch) +function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, batch, + additional_loss = nothing) ts = tspan[1]:(strategy.dx):tspan[2] # sum(abs2,inner_loss(t,θ) for t in ts) but Zygote generators are broken @@ -279,7 +279,7 @@ function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, end function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tspan, p, - batch) + batch, additional_loss = nothing) # sum(abs2,inner_loss(t,θ) for t in ts) but Zygote generators are broken function loss(θ, _) ts = adapt(parameterless_type(θ), @@ -294,8 +294,6 @@ function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tsp return loss end - - function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan) error("QuasiRandomTraining is not supported by NNODE since it's for high dimensional spaces only. Use StochasticTraining instead.") end @@ -391,6 +389,13 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractODEProblem, # additional loss additional_loss = alg.additional_loss + optf = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch, + additional_loss) + + # optf = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch) + # additional loss + additional_loss = alg.additional_loss + # Computes total_loss function total_loss(θ, _) L2_loss = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch)(θ, phi) From a5e200b3e1cd1f19ebf0be056af6c330a7f3d2c0 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sat, 1 Apr 2023 13:24:56 +0530 Subject: [PATCH 32/53] Added additional Loss function Feature for NNODE (user defined function), also now has common OptimizationFunction object definition(instead for each TrainingStrategy). --- src/ode_solve.jl | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 39bb40dca..968de617e 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -70,7 +70,8 @@ is an accurate interpolation (up to the neural network training result). In addi Lagaris, Isaac E., Aristidis Likas, and Dimitrios I. Fotiadis. "Artificial neural networks for solving ordinary and partial differential equations." IEEE Transactions on Neural Networks 9, no. 5 (1998): 987-1000. """ -struct NNODE{C, O, P, B, K, S <: Union{Nothing, AbstractTrainingStrategy}} <: +struct NNODE{C, O, P, B, K, S <: Union{Nothing, AbstractTrainingStrategy}, + AL <: Union{Nothing, Function}} <: NeuralPDEAlgorithm chain::C opt::O @@ -247,7 +248,7 @@ end Representation of the loss function, parametric on the training strategy `strategy` """ function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tspan, p, - batch, additional_loss = nothing) + batch) integrand(t::Number, θ) = abs2(inner_loss(phi, f, autodiff, t, θ, p)) integrand(ts, θ) = [abs2(inner_loss(phi, f, autodiff, t, θ, p)) for t in ts] @assert batch == 0 # not implemented @@ -258,12 +259,10 @@ function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tsp sol.u end - # Default this to ForwardDiff until Integrals.jl autodiff is sorted out - OptimizationFunction(loss, Optimization.AutoForwardDiff()) + return loss end -function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, batch, - additional_loss = nothing) +function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, batch) ts = tspan[1]:(strategy.dx):tspan[2] # sum(abs2,inner_loss(t,θ) for t in ts) but Zygote generators are broken @@ -279,7 +278,7 @@ function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, end function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tspan, p, - batch, additional_loss = nothing) + batch) # sum(abs2,inner_loss(t,θ) for t in ts) but Zygote generators are broken function loss(θ, _) ts = adapt(parameterless_type(θ), @@ -294,7 +293,8 @@ function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tsp return loss end -function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan) +function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan, + additional_loss) error("QuasiRandomTraining is not supported by NNODE since it's for high dimensional spaces only. Use StochasticTraining instead.") end @@ -389,13 +389,6 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractODEProblem, # additional loss additional_loss = alg.additional_loss - optf = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch, - additional_loss) - - # optf = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch) - # additional loss - additional_loss = alg.additional_loss - # Computes total_loss function total_loss(θ, _) L2_loss = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch)(θ, phi) @@ -410,8 +403,6 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractODEProblem, Optimization.AutoForwardDiff() elseif strategy isa StochasticTraining Optimization.AutoZygote() - elseif strategy isa WeightedIntervalTraining - Optimization.AutoZygote() else # by default GridTraining choice of Optimization # if adding new training algorithms we can extend this, From 568f934f03ff6ab55dca9bed609606217e81f0c8 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 02:46:18 +0530 Subject: [PATCH 33/53] Added Tests for additional_loss function feature Also updated docs(might need further editing) Future Scope: We can add weighted loss in NNODE,and even losses for Parameter Estimation for Inverse Problems. --- docs/src/manual/ode.md | 18 ++++++++ test/NNODE_tests.jl | 100 ----------------------------------------- 2 files changed, 18 insertions(+), 100 deletions(-) diff --git a/docs/src/manual/ode.md b/docs/src/manual/ode.md index 0cd20d406..b8fe44b92 100644 --- a/docs/src/manual/ode.md +++ b/docs/src/manual/ode.md @@ -2,4 +2,22 @@ ```@docs NNODE + +additional_loss as an argument for NNODE algorithm: +Any function which computes the additional_loss can be passed as an argument to NNODE algorithm call. + +example: +(u_, t_) = (u_analytical(sol.t), sol.t) +function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) +end + +alg1 = NeuralPDE.NNODE(chain, opt, strategy=StochasticTraining(1000), additional_loss=additional_loss) + +Here we define the additional loss function additional_loss(phi, θ ), the function has two arguments: + +phi the trial solution +θ the parameters of neural networks + +Note:Refering to above example phi can only take in t as a scalar at a time and θ as parameters of the network. ``` diff --git a/test/NNODE_tests.jl b/test/NNODE_tests.jl index d4dd5b0dd..95edf8eea 100644 --- a/test/NNODE_tests.jl +++ b/test/NNODE_tests.jl @@ -207,103 +207,3 @@ sol = solve(prob, NeuralPDE.NNODE(luxchain, opt; batch = true), verbose = true, maxiters = 400, abstol = 1.0f-8, dt = 1 / 5.0f0) @test sol.errors[:l2] < 0.5 -function f(u, p, t) - [p[1] * u[1] - p[2] * u[1] * u[2], -p[3] * u[2] + p[4] * u[1] * u[2]] -end - - - - -# Checking if additional_loss feature works for NNODE -linear = (u, p, t) -> cos(2pi * t) -linear_analytic = (u, p, t) -> (1 / (2pi)) * sin(2pi * t) -tspan = (0.0f0, 1.0f0) -dt = (tspan[2] - tspan[1]) / 99 -ts = collect(tspan[1]:dt:tspan[2]) -prob = ODEProblem(ODEFunction(linear, analytic = linear_analytic), 0.0f0, (0.0f0, 1.0f0)) -opt = OptimizationOptimisers.Adam(0.1, (0.9, 0.95)) - -# Analytical solution -u_analytical(x) = (1 / (2pi)) .* sin.(2pi .* x) - -# GridTraining (Flux Chain) -chain = Flux.Chain(Dense(1, 5, σ), Dense(5, 1)) - -(u_, t_) = (u_analytical(ts), ts) -function additional_loss(phi, θ) - return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) -end - -alg1 = NeuralPDE.NNODE(chain, opt, strategy = GridTraining(0.01), - additional_loss = additional_loss) - -sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-8, maxiters = 500) -@test sol1.errors[:l2] < 0.5 - -# GridTraining (Lux Chain) -luxchain = Lux.Chain(Lux.Dense(1, 5, Lux.σ), Lux.Dense(5, 1)) - -(u_, t_) = (u_analytical(ts), ts) -function additional_loss(phi, θ) - return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) -end - -alg1 = NeuralPDE.NNODE(luxchain, opt, strategy = GridTraining(0.01), - additional_loss = additional_loss) - -sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-8, maxiters = 500) -@test sol1.errors[:l2] < 0.5 - -# QuadratureTraining (Flux Chain) -chain = Flux.Chain(Dense(1, 5, σ), Dense(5, 1)) - -(u_, t_) = (u_analytical(ts), ts) -function additional_loss(phi, θ) - return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) -end - -alg1 = NeuralPDE.NNODE(chain, opt, additional_loss = additional_loss) - -sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-10, maxiters = 200) -@test sol1.errors[:l2] < 0.5 - -# QuadratureTraining (Lux Chain) -luxchain = Lux.Chain(Lux.Dense(1, 5, Lux.σ), Lux.Dense(5, 1)) - -(u_, t_) = (u_analytical(ts), ts) -function additional_loss(phi, θ) - return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) -end - -alg1 = NeuralPDE.NNODE(luxchain, opt, additional_loss = additional_loss) - -sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-10, maxiters = 200) -@test sol1.errors[:l2] < 0.5 - -# StochasticTraining(Flux Chain) -chain = Flux.Chain(Dense(1, 5, σ), Dense(5, 1)) - -(u_, t_) = (u_analytical(ts), ts) -function additional_loss(phi, θ) - return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) -end - -alg1 = NeuralPDE.NNODE(chain, opt, strategy = StochasticTraining(1000), - additional_loss = additional_loss) - -sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-8, maxiters = 500) -@test sol1.errors[:l2] < 0.5 - -# StochasticTraining (Lux Chain) -luxchain = Lux.Chain(Lux.Dense(1, 5, Lux.σ), Lux.Dense(5, 1)) - -(u_, t_) = (u_analytical(ts), ts) -function additional_loss(phi, θ) - return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) -end - -alg1 = NeuralPDE.NNODE(luxchain, opt, strategy = StochasticTraining(1000), - additional_loss = additional_loss) - -sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-8, maxiters = 500) -@test sol1.errors[:l2] < 0.5 From 96f20bdad3e58fe44f2c7aed40d960733d100f5d Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 03:34:08 +0530 Subject: [PATCH 34/53] formatted files --- test/NNODE_tests.jl | 94 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) diff --git a/test/NNODE_tests.jl b/test/NNODE_tests.jl index 95edf8eea..1f494ca7b 100644 --- a/test/NNODE_tests.jl +++ b/test/NNODE_tests.jl @@ -207,3 +207,97 @@ sol = solve(prob, NeuralPDE.NNODE(luxchain, opt; batch = true), verbose = true, maxiters = 400, abstol = 1.0f-8, dt = 1 / 5.0f0) @test sol.errors[:l2] < 0.5 + +# Checking if additional_loss feature works for NNODE +linear = (u, p, t) -> cos(2pi * t) +linear_analytic = (u, p, t) -> (1 / (2pi)) * sin(2pi * t) +tspan = (0.0f0, 1.0f0) +dt = (tspan[2] - tspan[1]) / 99 +ts = collect(tspan[1]:dt:tspan[2]) +prob = ODEProblem(ODEFunction(linear, analytic = linear_analytic), 0.0f0, (0.0f0, 1.0f0)) +opt = OptimizationOptimisers.Adam(0.1, (0.9, 0.95)) + +# Analytical solution +u_analytical(x) = (1 / (2pi)) .* sin.(2pi .* x) + +# GridTraining (Flux Chain) +chain = Flux.Chain(Dense(1, 5, σ), Dense(5, 1)) + +(u_, t_) = (u_analytical(ts), ts) +function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) +end + +alg1 = NeuralPDE.NNODE(chain, opt, strategy = GridTraining(0.01), + additional_loss = additional_loss) + +sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-8, maxiters = 500) +@test sol1.errors[:l2] < 0.5 + +# GridTraining (Lux Chain) +luxchain = Lux.Chain(Lux.Dense(1, 5, Lux.σ), Lux.Dense(5, 1)) + +(u_, t_) = (u_analytical(ts), ts) +function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) +end + +alg1 = NeuralPDE.NNODE(luxchain, opt, strategy = GridTraining(0.01), + additional_loss = additional_loss) + +sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-8, maxiters = 500) +@test sol1.errors[:l2] < 0.5 + +# QuadratureTraining (Flux Chain) +chain = Flux.Chain(Dense(1, 5, σ), Dense(5, 1)) + +(u_, t_) = (u_analytical(ts), ts) +function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) +end + +alg1 = NeuralPDE.NNODE(chain, opt, additional_loss = additional_loss) + +sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-10, maxiters = 200) +@test sol1.errors[:l2] < 0.5 + +# QuadratureTraining (Lux Chain) +luxchain = Lux.Chain(Lux.Dense(1, 5, Lux.σ), Lux.Dense(5, 1)) + +(u_, t_) = (u_analytical(ts), ts) +function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) +end + +alg1 = NeuralPDE.NNODE(luxchain, opt, additional_loss = additional_loss) + +sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-10, maxiters = 200) +@test sol1.errors[:l2] < 0.5 + +# StochasticTraining(Flux Chain) +chain = Flux.Chain(Dense(1, 5, σ), Dense(5, 1)) + +(u_, t_) = (u_analytical(ts), ts) +function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) +end + +alg1 = NeuralPDE.NNODE(chain, opt, strategy = StochasticTraining(1000), + additional_loss = additional_loss) + +sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-8, maxiters = 500) +@test sol1.errors[:l2] < 0.5 + +# StochasticTraining (Lux Chain) +luxchain = Lux.Chain(Lux.Dense(1, 5, Lux.σ), Lux.Dense(5, 1)) + +(u_, t_) = (u_analytical(ts), ts) +function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) +end + +alg1 = NeuralPDE.NNODE(luxchain, opt, strategy = StochasticTraining(1000), + additional_loss = additional_loss) + +sol1 = solve(prob, alg1, verbose = true, abstol = 1.0f-8, maxiters = 500) +@test sol1.errors[:l2] < 0.5 From 08b6c6a497ab61f725fe9358673c36358c74e4d3 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 14:04:53 +0530 Subject: [PATCH 35/53] docs --- docs/src/manual/ode.md | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/docs/src/manual/ode.md b/docs/src/manual/ode.md index b8fe44b92..0cd20d406 100644 --- a/docs/src/manual/ode.md +++ b/docs/src/manual/ode.md @@ -2,22 +2,4 @@ ```@docs NNODE - -additional_loss as an argument for NNODE algorithm: -Any function which computes the additional_loss can be passed as an argument to NNODE algorithm call. - -example: -(u_, t_) = (u_analytical(sol.t), sol.t) -function additional_loss(phi, θ) - return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) -end - -alg1 = NeuralPDE.NNODE(chain, opt, strategy=StochasticTraining(1000), additional_loss=additional_loss) - -Here we define the additional loss function additional_loss(phi, θ ), the function has two arguments: - -phi the trial solution -θ the parameters of neural networks - -Note:Refering to above example phi can only take in t as a scalar at a time and θ as parameters of the network. ``` From 8e0b7c85a948797ee1bfbb9fb124db5ec1de9c64 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 22:36:00 +0530 Subject: [PATCH 36/53] Mixed my PR with PR #635 by sdesai1287 --- src/ode_solve.jl | 33 +++++++++++++++++++++++++++++++++ test/NNODE_tests.jl | 23 ++++++++++++++++++++++- 2 files changed, 55 insertions(+), 1 deletion(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 968de617e..418ef3351 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -293,6 +293,39 @@ function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tsp return loss end +function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Bool, tspan, p, + batch) + minT = tspan[1] + maxT = tspan[2] + + weights = strategy.weights ./ sum(strategy.weights) + + N = length(weights) + samples = strategy.samples + + difference = (maxT - minT) / N + + data = Float64[] + for (index, item) in enumerate(weights) + temp_data = rand(1, trunc(Int, samples * item)) .* difference .+ minT .+ + ((index - 1) * difference) + data = append!(data, temp_data) + end + + ts = data + + function loss(θ, _) + if batch + sum(abs2, inner_loss(phi, f, autodiff, ts, θ, p)) + else + sum(abs2, [inner_loss(phi, f, autodiff, t, θ, p) for t in ts]) + end + end + + return loss + optf = OptimizationFunction(loss, Optimization.AutoZygote()) +end + function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan, additional_loss) error("QuasiRandomTraining is not supported by NNODE since it's for high dimensional spaces only. Use StochasticTraining instead.") diff --git a/test/NNODE_tests.jl b/test/NNODE_tests.jl index 1f494ca7b..c04f3300b 100644 --- a/test/NNODE_tests.jl +++ b/test/NNODE_tests.jl @@ -1,7 +1,6 @@ using Test, Flux using Random, NeuralPDE using OrdinaryDiffEq, Optimisers, Statistics -using OrdinaryDiffEq, Optimisers, Statistics import Lux, OptimizationOptimisers, OptimizationOptimJL Random.seed!(100) @@ -208,6 +207,28 @@ sol = solve(prob, NeuralPDE.NNODE(luxchain, opt; batch = true), verbose = true, abstol = 1.0f-8, dt = 1 / 5.0f0) @test sol.errors[:l2] < 0.5 +function f(u, p, t) + [p[1] * u[1] - p[2] * u[1] * u[2], -p[3] * u[2] + p[4] * u[1] * u[2]] +end + +p = [1.5, 1.0, 3.0, 1.0] +u0 = [1.0, 1.0] +prob_oop = ODEProblem{false}(f, u0, (0.0, 3.0), p) +true_sol = solve(prob_oop, Tsit5(), saveat = 0.01) +func = Lux.σ +N = 12 +chain = Lux.Chain(Lux.Dense(1, N, func), Lux.Dense(N, N, func), Lux.Dense(N, N, func), + Lux.Dense(N, N, func), Lux.Dense(N, length(u0))) + +opt = Optimisers.Adam(0.01) +weights = [0.7, 0.2, 0.1] +samples = 200 +alg = NeuralPDE.NNODE(chain, opt, autodiff = false, + strategy = NeuralPDE.WeightedIntervalTraining(weights, samples)) +sol = solve(prob_oop, alg, verbose = true, maxiters = 100000, saveat = 0.01) + +@test abs(mean(sol) - mean(true_sol)) < 0.2 + # Checking if additional_loss feature works for NNODE linear = (u, p, t) -> cos(2pi * t) linear_analytic = (u, p, t) -> (1 / (2pi)) * sin(2pi * t) From 5b2ee57e464c81b68b516cafe5678c32f82b969b Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 22:42:21 +0530 Subject: [PATCH 37/53] fixed optimizations choice --- src/ode_solve.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 418ef3351..648beca04 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -323,7 +323,6 @@ function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Boo end return loss - optf = OptimizationFunction(loss, Optimization.AutoZygote()) end function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan, @@ -436,6 +435,8 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractODEProblem, Optimization.AutoForwardDiff() elseif strategy isa StochasticTraining Optimization.AutoZygote() + elseif strategy isa WeightedIntervalTraining + Optimization.AutoZygote() else # by default GridTraining choice of Optimization # if adding new training algorithms we can extend this, From d1216d44d7a12d7869dff088a9fdd659e58aaf22 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Mon, 3 Apr 2023 20:18:48 +0530 Subject: [PATCH 38/53] rebased --- test/NNODE_tests.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/test/NNODE_tests.jl b/test/NNODE_tests.jl index c04f3300b..1839f57c7 100644 --- a/test/NNODE_tests.jl +++ b/test/NNODE_tests.jl @@ -207,6 +207,7 @@ sol = solve(prob, NeuralPDE.NNODE(luxchain, opt; batch = true), verbose = true, abstol = 1.0f-8, dt = 1 / 5.0f0) @test sol.errors[:l2] < 0.5 +# WeightedIntervalTraining(Lux Chain) function f(u, p, t) [p[1] * u[1] - p[2] * u[1] * u[2], -p[3] * u[2] + p[4] * u[1] * u[2]] end From f369613fcfd209a87135d491f856fc733558d14d Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Tue, 4 Apr 2023 02:05:47 +0530 Subject: [PATCH 39/53] Actually performed Rebase and Formatted some files --- src/ode_solve.jl | 35 +++++++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 648beca04..6e1a56a42 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -325,8 +325,39 @@ function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Boo return loss end -function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan, - additional_loss) +function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Bool, tspan, p, + batch) + minT = tspan[1] + maxT = tspan[2] + + weights = strategy.weights ./ sum(strategy.weights) + + N = length(weights) + samples = strategy.samples + + difference = (maxT - minT) / N + + data = Float64[] + for (index, item) in enumerate(weights) + temp_data = rand(1, trunc(Int, samples * item)) .* difference .+ minT .+ + ((index - 1) * difference) + data = append!(data, temp_data) + end + + ts = data + + function loss(θ, _) + if batch + sum(abs2, inner_loss(phi, f, autodiff, ts, θ, p)) + else + sum(abs2, [inner_loss(phi, f, autodiff, t, θ, p) for t in ts]) + end + end + + return loss +end + +function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan) error("QuasiRandomTraining is not supported by NNODE since it's for high dimensional spaces only. Use StochasticTraining instead.") end From 2f625f65f720facc1ad9555435e19982e6f39c8f Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Tue, 4 Apr 2023 13:38:42 +0530 Subject: [PATCH 40/53] fixed ode_solve.jl rebase issues --- src/ode_solve.jl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 6e1a56a42..941617d5a 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -70,8 +70,9 @@ is an accurate interpolation (up to the neural network training result). In addi Lagaris, Isaac E., Aristidis Likas, and Dimitrios I. Fotiadis. "Artificial neural networks for solving ordinary and partial differential equations." IEEE Transactions on Neural Networks 9, no. 5 (1998): 987-1000. """ -struct NNODE{C, O, P, B, K, S <: Union{Nothing, AbstractTrainingStrategy}, - AL <: Union{Nothing, Function}} <: +struct NNODE{C, O, P, B, K, AL <: Union{Nothing, Function}, + S <: Union{Nothing, AbstractTrainingStrategy} + } <: NeuralPDEAlgorithm chain::C opt::O From e3938ed8f1d2fd280bcb4904f18e2344148e6a2f Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Tue, 4 Apr 2023 15:36:06 +0530 Subject: [PATCH 41/53] . --- src/ode_solve.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 941617d5a..f4e45be2c 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -85,8 +85,8 @@ struct NNODE{C, O, P, B, K, AL <: Union{Nothing, Function}, end function NNODE(chain, opt, init_params = nothing; strategy = nothing, - autodiff = false, batch = nothing, kwargs...) - NNODE(chain, opt, init_params, autodiff, batch, strategy, kwargs) + autodiff = false, batch = nothing, additional_loss = nothing, kwargs...) + NNODE(chain, opt, init_params, autodiff, batch, strategy, additional_loss, kwargs) end """ From 44a3e906b14eef51df6ef1b867fd910ec7ad9f1e Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Thu, 30 Mar 2023 15:09:24 +0530 Subject: [PATCH 42/53] added additional loss against data for NNODE(still needs reviewing) --- src/ode_solve.jl | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index f4e45be2c..907a9e30d 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -82,10 +82,11 @@ struct NNODE{C, O, P, B, K, AL <: Union{Nothing, Function}, strategy::S additional_loss::AL kwargs::K + additional_loss::AL end function NNODE(chain, opt, init_params = nothing; strategy = nothing, - autodiff = false, batch = nothing, additional_loss = nothing, kwargs...) + autodiff = false, batch = nothing,additional_loss = nothing, kwargs...) NNODE(chain, opt, init_params, autodiff, batch, strategy, additional_loss, kwargs) end @@ -249,7 +250,7 @@ end Representation of the loss function, parametric on the training strategy `strategy` """ function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tspan, p, - batch) + batch, additional_loss = nothing) integrand(t::Number, θ) = abs2(inner_loss(phi, f, autodiff, t, θ, p)) integrand(ts, θ) = [abs2(inner_loss(phi, f, autodiff, t, θ, p)) for t in ts] @assert batch == 0 # not implemented @@ -263,7 +264,8 @@ function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tsp return loss end -function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, batch) +function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, batch, + additional_loss = nothing) ts = tspan[1]:(strategy.dx):tspan[2] # sum(abs2,inner_loss(t,θ) for t in ts) but Zygote generators are broken @@ -274,12 +276,11 @@ function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, sum(abs2, [inner_loss(phi, f, autodiff, t, θ, p) for t in ts]) end end - return loss end function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tspan, p, - batch) + batch, additional_loss = nothing) # sum(abs2,inner_loss(t,θ) for t in ts) but Zygote generators are broken function loss(θ, _) ts = adapt(parameterless_type(θ), @@ -358,7 +359,8 @@ function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Boo return loss end -function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan) +function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan, + additional_loss = nothing) error("QuasiRandomTraining is not supported by NNODE since it's for high dimensional spaces only. Use StochasticTraining instead.") end @@ -450,12 +452,17 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractODEProblem, alg.batch end + + # additional loss additional_loss = alg.additional_loss # Computes total_loss function total_loss(θ, _) - L2_loss = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch)(θ, phi) + L2_loss = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch, + additional_loss) + + # optf = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch)(θ, phi) if !(additional_loss isa Nothing) return additional_loss(phi, θ) + L2_loss end From f23b14147aa41a9a9285e426657ceaac931d40c4 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sat, 1 Apr 2023 13:24:56 +0530 Subject: [PATCH 43/53] Added additional Loss function Feature for NNODE (user defined function), also now has common OptimizationFunction object definition(instead for each TrainingStrategy). --- src/ode_solve.jl | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 907a9e30d..ef51a9872 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -82,7 +82,6 @@ struct NNODE{C, O, P, B, K, AL <: Union{Nothing, Function}, strategy::S additional_loss::AL kwargs::K - additional_loss::AL end function NNODE(chain, opt, init_params = nothing; strategy = nothing, @@ -250,7 +249,7 @@ end Representation of the loss function, parametric on the training strategy `strategy` """ function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tspan, p, - batch, additional_loss = nothing) + batch) integrand(t::Number, θ) = abs2(inner_loss(phi, f, autodiff, t, θ, p)) integrand(ts, θ) = [abs2(inner_loss(phi, f, autodiff, t, θ, p)) for t in ts] @assert batch == 0 # not implemented @@ -264,8 +263,7 @@ function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tsp return loss end -function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, batch, - additional_loss = nothing) +function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, batch) ts = tspan[1]:(strategy.dx):tspan[2] # sum(abs2,inner_loss(t,θ) for t in ts) but Zygote generators are broken @@ -276,11 +274,12 @@ function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, sum(abs2, [inner_loss(phi, f, autodiff, t, θ, p) for t in ts]) end end + return loss end function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tspan, p, - batch, additional_loss = nothing) + batch) # sum(abs2,inner_loss(t,θ) for t in ts) but Zygote generators are broken function loss(θ, _) ts = adapt(parameterless_type(θ), @@ -360,7 +359,7 @@ function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Boo end function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan, - additional_loss = nothing) + additional_loss) error("QuasiRandomTraining is not supported by NNODE since it's for high dimensional spaces only. Use StochasticTraining instead.") end @@ -453,22 +452,21 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractODEProblem, end - # additional loss additional_loss = alg.additional_loss - # Computes total_loss - function total_loss(θ, _) - L2_loss = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch, - additional_loss) + - # optf = generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, batch)(θ, phi) + # Creates OptimizationFunction Object from total_loss + optf = OptimizationFunction(total_loss, opt_algo)(θ, phi) if !(additional_loss isa Nothing) return additional_loss(phi, θ) + L2_loss end L2_loss end + + # Choice of Optimization Algo for Training Strategies opt_algo = if strategy isa QuadratureTraining Optimization.AutoForwardDiff() From f47159e27a37d6bcb79c4b262136bc6cc2bc5da3 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 02:46:18 +0530 Subject: [PATCH 44/53] Added Tests for additional_loss function feature Also updated docs(might need further editing) Future Scope: We can add weighted loss in NNODE,and even losses for Parameter Estimation for Inverse Problems. --- docs/src/manual/ode.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/docs/src/manual/ode.md b/docs/src/manual/ode.md index 0cd20d406..b8fe44b92 100644 --- a/docs/src/manual/ode.md +++ b/docs/src/manual/ode.md @@ -2,4 +2,22 @@ ```@docs NNODE + +additional_loss as an argument for NNODE algorithm: +Any function which computes the additional_loss can be passed as an argument to NNODE algorithm call. + +example: +(u_, t_) = (u_analytical(sol.t), sol.t) +function additional_loss(phi, θ) + return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) +end + +alg1 = NeuralPDE.NNODE(chain, opt, strategy=StochasticTraining(1000), additional_loss=additional_loss) + +Here we define the additional loss function additional_loss(phi, θ ), the function has two arguments: + +phi the trial solution +θ the parameters of neural networks + +Note:Refering to above example phi can only take in t as a scalar at a time and θ as parameters of the network. ``` From b734a591cc71659fb9a9f756864183d0dad5afb4 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 14:04:53 +0530 Subject: [PATCH 45/53] docs --- docs/src/manual/ode.md | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/docs/src/manual/ode.md b/docs/src/manual/ode.md index b8fe44b92..0cd20d406 100644 --- a/docs/src/manual/ode.md +++ b/docs/src/manual/ode.md @@ -2,22 +2,4 @@ ```@docs NNODE - -additional_loss as an argument for NNODE algorithm: -Any function which computes the additional_loss can be passed as an argument to NNODE algorithm call. - -example: -(u_, t_) = (u_analytical(sol.t), sol.t) -function additional_loss(phi, θ) - return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) -end - -alg1 = NeuralPDE.NNODE(chain, opt, strategy=StochasticTraining(1000), additional_loss=additional_loss) - -Here we define the additional loss function additional_loss(phi, θ ), the function has two arguments: - -phi the trial solution -θ the parameters of neural networks - -Note:Refering to above example phi can only take in t as a scalar at a time and θ as parameters of the network. ``` From 18439e785c1d1fc49333133b670a2632a64c6be9 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 22:36:00 +0530 Subject: [PATCH 46/53] Mixed my PR with PR #635 by sdesai1287 --- src/ode_solve.jl | 33 +++++++++++++++++++++++++++++++++ test/NNODE_tests.jl | 2 +- 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index ef51a9872..3de9d052f 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -358,6 +358,39 @@ function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Boo return loss end +function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Bool, tspan, p, + batch) + minT = tspan[1] + maxT = tspan[2] + + weights = strategy.weights ./ sum(strategy.weights) + + N = length(weights) + samples = strategy.samples + + difference = (maxT - minT) / N + + data = Float64[] + for (index, item) in enumerate(weights) + temp_data = rand(1, trunc(Int, samples * item)) .* difference .+ minT .+ + ((index - 1) * difference) + data = append!(data, temp_data) + end + + ts = data + + function loss(θ, _) + if batch + sum(abs2, inner_loss(phi, f, autodiff, ts, θ, p)) + else + sum(abs2, [inner_loss(phi, f, autodiff, t, θ, p) for t in ts]) + end + end + + return loss + optf = OptimizationFunction(loss, Optimization.AutoZygote()) +end + function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan, additional_loss) error("QuasiRandomTraining is not supported by NNODE since it's for high dimensional spaces only. Use StochasticTraining instead.") diff --git a/test/NNODE_tests.jl b/test/NNODE_tests.jl index 1839f57c7..ffd082e6f 100644 --- a/test/NNODE_tests.jl +++ b/test/NNODE_tests.jl @@ -205,7 +205,7 @@ sol = solve(prob, NeuralPDE.NNODE(luxchain, opt; batch = false), verbose = true, sol = solve(prob, NeuralPDE.NNODE(luxchain, opt; batch = true), verbose = true, maxiters = 400, abstol = 1.0f-8, dt = 1 / 5.0f0) -@test sol.errors[:l2] < 0.5 +@test sol.errors[:l2] # WeightedIntervalTraining(Lux Chain) function f(u, p, t) From c0f397e29491f38e1f4dc3ef911e21d6cfff79b3 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sun, 2 Apr 2023 22:42:21 +0530 Subject: [PATCH 47/53] fixed optimizations choice --- src/ode_solve.jl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 3de9d052f..ff69d1d6f 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -388,7 +388,6 @@ function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Boo end return loss - optf = OptimizationFunction(loss, Optimization.AutoZygote()) end function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan, From 43d9efb2e02fd7cdffa841a9b6d32f359b1e0f9a Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Tue, 4 Apr 2023 02:05:47 +0530 Subject: [PATCH 48/53] Actually performed Rebase and Formatted some files --- src/ode_solve.jl | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index ff69d1d6f..86a35fd3e 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -389,9 +389,7 @@ function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Boo return loss end - -function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan, - additional_loss) +function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan) error("QuasiRandomTraining is not supported by NNODE since it's for high dimensional spaces only. Use StochasticTraining instead.") end From b6b31b2bd8facd2c73d8ba66322ddf496307c3d2 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Tue, 4 Apr 2023 18:40:08 +0530 Subject: [PATCH 49/53] rebase Fr Fr --- src/ode_solve.jl | 68 ------------------------------------------------ 1 file changed, 68 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 86a35fd3e..8f3e033aa 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -326,69 +326,6 @@ function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Boo return loss end -function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Bool, tspan, p, - batch) - minT = tspan[1] - maxT = tspan[2] - - weights = strategy.weights ./ sum(strategy.weights) - - N = length(weights) - samples = strategy.samples - - difference = (maxT - minT) / N - - data = Float64[] - for (index, item) in enumerate(weights) - temp_data = rand(1, trunc(Int, samples * item)) .* difference .+ minT .+ - ((index - 1) * difference) - data = append!(data, temp_data) - end - - ts = data - - function loss(θ, _) - if batch - sum(abs2, inner_loss(phi, f, autodiff, ts, θ, p)) - else - sum(abs2, [inner_loss(phi, f, autodiff, t, θ, p) for t in ts]) - end - end - - return loss -end - -function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Bool, tspan, p, - batch) - minT = tspan[1] - maxT = tspan[2] - - weights = strategy.weights ./ sum(strategy.weights) - - N = length(weights) - samples = strategy.samples - - difference = (maxT - minT) / N - - data = Float64[] - for (index, item) in enumerate(weights) - temp_data = rand(1, trunc(Int, samples * item)) .* difference .+ minT .+ - ((index - 1) * difference) - data = append!(data, temp_data) - end - - ts = data - - function loss(θ, _) - if batch - sum(abs2, inner_loss(phi, f, autodiff, ts, θ, p)) - else - sum(abs2, [inner_loss(phi, f, autodiff, t, θ, p) for t in ts]) - end - end - - return loss -end function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan) error("QuasiRandomTraining is not supported by NNODE since it's for high dimensional spaces only. Use StochasticTraining instead.") end @@ -481,12 +418,9 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractODEProblem, alg.batch end - # additional loss additional_loss = alg.additional_loss - - # Creates OptimizationFunction Object from total_loss optf = OptimizationFunction(total_loss, opt_algo)(θ, phi) if !(additional_loss isa Nothing) @@ -495,8 +429,6 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractODEProblem, L2_loss end - - # Choice of Optimization Algo for Training Strategies opt_algo = if strategy isa QuadratureTraining Optimization.AutoForwardDiff() From 09c9cf8dd5a50edf1173268750dfdd483811ae9d Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Tue, 4 Apr 2023 18:41:19 +0530 Subject: [PATCH 50/53] stuff happened --- src/ode_solve.jl | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 8f3e033aa..10ecc9aa1 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -3,7 +3,7 @@ abstract type NeuralPDEAlgorithm <: DiffEqBase.AbstractODEAlgorithm end """ ```julia NNODE(chain, opt=OptimizationPolyalgorithms.PolyOpt(), init_params = nothing; - autodiff=false, batch=0,additional_loss=nothing kwargs...) + autodiff=false, batch=0,additional_loss=nothing,kwargs...) ``` Algorithm for solving ordinary differential equations using a neural network. This is a specialization @@ -85,7 +85,7 @@ struct NNODE{C, O, P, B, K, AL <: Union{Nothing, Function}, end function NNODE(chain, opt, init_params = nothing; strategy = nothing, - autodiff = false, batch = nothing,additional_loss = nothing, kwargs...) + autodiff = false, batch = nothing, additional_loss = nothing, kwargs...) NNODE(chain, opt, init_params, autodiff, batch, strategy, additional_loss, kwargs) end @@ -422,7 +422,8 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractODEProblem, additional_loss = alg.additional_loss # Creates OptimizationFunction Object from total_loss - optf = OptimizationFunction(total_loss, opt_algo)(θ, phi) + function total_loss(θ, _) + L2_loss = generate_loss(strategy, phi, f, autodiff, tspan, p, batch)(θ, phi) if !(additional_loss isa Nothing) return additional_loss(phi, θ) + L2_loss end From 64f1464f7724a0f427aabcbefb8c3591375a5920 Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Tue, 4 Apr 2023 18:58:47 +0530 Subject: [PATCH 51/53] Fixed Tests line 208 NNODE_tests --- test/NNODE_tests.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/NNODE_tests.jl b/test/NNODE_tests.jl index ffd082e6f..1839f57c7 100644 --- a/test/NNODE_tests.jl +++ b/test/NNODE_tests.jl @@ -205,7 +205,7 @@ sol = solve(prob, NeuralPDE.NNODE(luxchain, opt; batch = false), verbose = true, sol = solve(prob, NeuralPDE.NNODE(luxchain, opt; batch = true), verbose = true, maxiters = 400, abstol = 1.0f-8, dt = 1 / 5.0f0) -@test sol.errors[:l2] +@test sol.errors[:l2] < 0.5 # WeightedIntervalTraining(Lux Chain) function f(u, p, t) From a2eef3d1609dd8c03ebd422938c4944da7a61c4c Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Tue, 4 Apr 2023 19:43:16 +0530 Subject: [PATCH 52/53] changes from review --- src/ode_solve.jl | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 10ecc9aa1..1c3d7afb3 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -3,7 +3,8 @@ abstract type NeuralPDEAlgorithm <: DiffEqBase.AbstractODEAlgorithm end """ ```julia NNODE(chain, opt=OptimizationPolyalgorithms.PolyOpt(), init_params = nothing; - autodiff=false, batch=0,additional_loss=nothing,kwargs...) + autodiff=false, batch=0,additional_loss=nothing, + kwargs...) ``` Algorithm for solving ordinary differential equations using a neural network. This is a specialization @@ -25,12 +26,18 @@ of the physics-informed neural network which is used as a solver for a standard ## Keyword Arguments * `additional_loss`: A function additional_loss(phi, θ) where phi are the neural network trial solutions, θ are the weights of the neural network(s). -example: + +## Example + +```julia ts=[t for t in 1:100] (u_, t_) = (analytical_func(ts), ts) function additional_loss(phi, θ) return sum(sum(abs2, [phi(t, θ) for t in t_] .- u_)) / length(u_) end + alg = NeuralPDE.NNODE(chain, opt, additional_loss = additional_loss) +``` + * `autodiff`: The switch between automatic and numerical differentiation for the PDE operators. The reverse mode of the loss function is always automatic differentiation (via Zygote), this is only for the derivative @@ -274,7 +281,6 @@ function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, sum(abs2, [inner_loss(phi, f, autodiff, t, θ, p) for t in ts]) end end - return loss end @@ -290,7 +296,6 @@ function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tsp sum(abs2, [inner_loss(phi, f, autodiff, t, θ, p) for t in ts]) end end - return loss end @@ -418,12 +423,12 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractODEProblem, alg.batch end - # additional loss + inner_f = generate_loss(strategy, phi, f, autodiff, tspan, p, batch) additional_loss = alg.additional_loss # Creates OptimizationFunction Object from total_loss function total_loss(θ, _) - L2_loss = generate_loss(strategy, phi, f, autodiff, tspan, p, batch)(θ, phi) + L2_loss = inner_f(θ, phi) if !(additional_loss isa Nothing) return additional_loss(phi, θ) + L2_loss end @@ -433,14 +438,7 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractODEProblem, # Choice of Optimization Algo for Training Strategies opt_algo = if strategy isa QuadratureTraining Optimization.AutoForwardDiff() - elseif strategy isa StochasticTraining - Optimization.AutoZygote() - elseif strategy isa WeightedIntervalTraining - Optimization.AutoZygote() else - # by default GridTraining choice of Optimization - # if adding new training algorithms we can extend this, - # if-elseif-else block for choices of optimization algos Optimization.AutoZygote() end From 9495d96715a8567359d8559a0bf758b1a5b52d57 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Tue, 4 Apr 2023 10:51:49 -0400 Subject: [PATCH 53/53] Update src/ode_solve.jl --- src/ode_solve.jl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 1c3d7afb3..84e6daf29 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -327,7 +327,6 @@ function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Boo sum(abs2, [inner_loss(phi, f, autodiff, t, θ, p) for t in ts]) end end - return loss end