Skip to content

Commit a2a4836

Browse files
committed
Run JuliaFormatter.format()
Using JuliaFormatter v1.0.47
1 parent 46d9f10 commit a2a4836

32 files changed

+263
-243
lines changed

docs/make.jl

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -8,12 +8,12 @@ ENV["GKSwstype"] = "100"
88
include("pages.jl")
99

1010
makedocs(sitename = "DiffEqParamEstim.jl",
11-
authors = "Chris Rackauckas et al.",
12-
modules = [DiffEqParamEstim],
13-
clean = true, doctest = false, linkcheck = true,
14-
format = Documenter.HTML(assets = ["assets/favicon.ico"],
15-
canonical = "https://docs.sciml.ai/DiffEqParamEstim/stable/"),
16-
pages = pages)
11+
authors = "Chris Rackauckas et al.",
12+
modules = [DiffEqParamEstim],
13+
clean = true, doctest = false, linkcheck = true,
14+
format = Documenter.HTML(assets = ["assets/favicon.ico"],
15+
canonical = "https://docs.sciml.ai/DiffEqParamEstim/stable/"),
16+
pages = pages)
1717

1818
deploydocs(repo = "github.com/SciML/DiffEqParamEstim.jl";
19-
push_preview = true)
19+
push_preview = true)

docs/pages.jl

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
pages = ["index.md",
22
"getting_started.md",
33
"Tutorials" => Any["tutorials/global_optimization.md",
4-
"tutorials/generalized_likelihood.md",
5-
"tutorials/stochastic_evaluations.md",
6-
"tutorials/ensemble.md"],
4+
"tutorials/generalized_likelihood.md",
5+
"tutorials/stochastic_evaluations.md",
6+
"tutorials/ensemble.md"],
77
"Methods" => Any["methods/recommended_methods.md",
8-
"methods/optimization_based_methods.md",
9-
"methods/collocation_loss.md"],
8+
"methods/optimization_based_methods.md",
9+
"methods/collocation_loss.md"]
1010
]

docs/src/getting_started.md

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -77,8 +77,8 @@ function:
7777

7878
```@example ode
7979
cost_function = build_loss_objective(prob, Tsit5(), L2Loss(t, data),
80-
Optimization.AutoForwardDiff(),
81-
maxiters = 10000, verbose = false)
80+
Optimization.AutoForwardDiff(),
81+
maxiters = 10000, verbose = false)
8282
```
8383

8484
This objective function internally is calling the ODE solver to get solutions
@@ -103,8 +103,8 @@ of parameter values:
103103
```@example ode
104104
vals = 0.0:0.1:10.0
105105
plot(vals, [cost_function(i) for i in vals], yscale = :log10,
106-
xaxis = "Parameter", yaxis = "Cost", title = "1-Parameter Cost Function",
107-
lw = 3)
106+
xaxis = "Parameter", yaxis = "Cost", title = "1-Parameter Cost Function",
107+
lw = 3)
108108
```
109109

110110
Here we see that there is a very well-defined minimum in our cost function at
@@ -168,8 +168,8 @@ We can build an objective function and solve the multiple parameter version just
168168

169169
```@example ode
170170
cost_function = build_loss_objective(prob, Tsit5(), L2Loss(t, data),
171-
Optimization.AutoForwardDiff(),
172-
maxiters = 10000, verbose = false)
171+
Optimization.AutoForwardDiff(),
172+
maxiters = 10000, verbose = false)
173173
optprob = Optimization.OptimizationProblem(cost_function, [1.3, 0.8, 2.8, 1.2])
174174
result_bfgs = solve(optprob, BFGS())
175175
```
@@ -184,10 +184,10 @@ differencing loss to the total loss.
184184

185185
```@example ode
186186
cost_function = build_loss_objective(prob, Tsit5(),
187-
L2Loss(t, data, differ_weight = 0.3,
188-
data_weight = 0.7),
189-
Optimization.AutoForwardDiff(),
190-
maxiters = 10000, verbose = false)
187+
L2Loss(t, data, differ_weight = 0.3,
188+
data_weight = 0.7),
189+
Optimization.AutoForwardDiff(),
190+
maxiters = 10000, verbose = false)
191191
optprob = Optimization.OptimizationProblem(cost_function, [1.3, 0.8, 2.8, 1.2])
192192
result_bfgs = solve(optprob, BFGS())
193193
```
@@ -206,14 +206,14 @@ ms_prob = ODEProblem(ms_f1, ms_u0, tspan, ms_p)
206206
t = collect(range(0, stop = 10, length = 200))
207207
data = Array(solve(ms_prob, Tsit5(), saveat = t, abstol = 1e-12, reltol = 1e-12))
208208
bound = Tuple{Float64, Float64}[(0, 10), (0, 10), (0, 10), (0, 10),
209-
(0, 10), (0, 10), (0, 10), (0, 10),
210-
(0, 10), (0, 10), (0, 10), (0, 10),
211-
(0, 10), (0, 10), (0, 10), (0, 10), (0, 10), (0, 10)]
209+
(0, 10), (0, 10), (0, 10), (0, 10),
210+
(0, 10), (0, 10), (0, 10), (0, 10),
211+
(0, 10), (0, 10), (0, 10), (0, 10), (0, 10), (0, 10)]
212212
213213
ms_obj = multiple_shooting_objective(ms_prob, Tsit5(), L2Loss(t, data),
214-
Optimization.AutoForwardDiff();
215-
discontinuity_weight = 1.0, abstol = 1e-12,
216-
reltol = 1e-12)
214+
Optimization.AutoForwardDiff();
215+
discontinuity_weight = 1.0, abstol = 1e-12,
216+
reltol = 1e-12)
217217
```
218218

219219
This creates the objective function that can be passed to an optimizer, from which we can then get the parameter values
@@ -222,7 +222,7 @@ a global optimization method to improve robustness even more:
222222

223223
```@example ode
224224
optprob = Optimization.OptimizationProblem(ms_obj, zeros(18), lb = first.(bound),
225-
ub = last.(bound))
225+
ub = last.(bound))
226226
optsol_ms = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 10_000)
227227
```
228228

docs/src/methods/collocation_loss.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ but is much faster, and is a good method to try first to get in the general
1111

1212
```julia
1313
function two_stage_objective(prob::DEProblem, tpoints, data, adtype = SciMLBase.NoAD(), ;
14-
kernel = :Epanechnikov,
15-
loss_func = L2DistLoss)
14+
kernel = :Epanechnikov,
15+
loss_func = L2DistLoss)
1616
end
1717
```

docs/src/methods/optimization_based_methods.md

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -9,11 +9,11 @@ and MathProgBase-associated solvers like NLopt.
99

1010
```julia
1111
function build_loss_objective(prob::DEProblem, alg, loss,
12-
adtype = SciMLBase.NoAD(),
13-
regularization = nothing;
14-
priors = nothing,
15-
prob_generator = STANDARD_PROB_GENERATOR,
16-
kwargs...)
12+
adtype = SciMLBase.NoAD(),
13+
regularization = nothing;
14+
priors = nothing,
15+
prob_generator = STANDARD_PROB_GENERATOR,
16+
kwargs...)
1717
end
1818
```
1919

@@ -38,12 +38,12 @@ proceeds as follows:
3838

3939
```julia
4040
function multiple_shooting_objective(prob::DiffEqBase.DEProblem, alg, loss,
41-
adtype = SciMLBase.NoAD(),
42-
regularization = nothing;
43-
priors = nothing,
44-
discontinuity_weight = 1.0,
45-
prob_generator = STANDARD_PROB_GENERATOR,
46-
kwargs...)
41+
adtype = SciMLBase.NoAD(),
42+
regularization = nothing;
43+
priors = nothing,
44+
discontinuity_weight = 1.0,
45+
prob_generator = STANDARD_PROB_GENERATOR,
46+
kwargs...)
4747
end
4848
```
4949

@@ -67,7 +67,7 @@ cost functions:
6767

6868
```julia
6969
L2Loss(t, data; differ_weight = nothing, data_weight = nothing,
70-
colloc_grad = nothing, dudt = nothing)
70+
colloc_grad = nothing, dudt = nothing)
7171
```
7272

7373
where `t` is the set of timepoints which the data are found at, and
@@ -213,6 +213,6 @@ the parameters or a multivariate distribution.
213213

214214
```julia
215215
ms_obj = multiple_shooting_objective(ms_prob, Tsit5(), L2Loss(t, data); priors = priors,
216-
discontinuity_weight = 1.0, abstol = 1e-12,
217-
reltol = 1e-12)
216+
discontinuity_weight = 1.0, abstol = 1e-12,
217+
reltol = 1e-12)
218218
```

docs/src/tutorials/ensemble.md

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ initial_conditions = [
4444
[0.5, 0.5],
4545
[2.0, 1.0],
4646
[1.0, 2.0],
47-
[2.0, 2.0],
47+
[2.0, 2.0]
4848
]
4949
function prob_func(prob, i, repeat)
5050
ODEProblem(prob.f, initial_conditions[i], prob.tspan, prob.p)
@@ -111,8 +111,8 @@ Put this into build_loss_objective.
111111

112112
```@example ensemble
113113
obj = build_loss_objective(enprob, Tsit5(), loss, Optimization.AutoForwardDiff(),
114-
trajectories = N,
115-
saveat = data_times)
114+
trajectories = N,
115+
saveat = data_times)
116116
```
117117

118118
Notice that we added the kwargs for `solve` of the `EnsembleProblem` into this. They get passed to the internal `solve`
@@ -141,9 +141,9 @@ to decrease the tolerance of the ODE solvers via
141141

142142
```@example ensemble
143143
obj = build_loss_objective(enprob, Tsit5(), loss, Optimization.AutoForwardDiff(),
144-
trajectories = N,
145-
abstol = 1e-8, reltol = 1e-8,
146-
saveat = data_times)
144+
trajectories = N,
145+
abstol = 1e-8, reltol = 1e-8,
146+
saveat = data_times)
147147
optprob = OptimizationProblem(obj, [1.3, 0.9], lb = lower, ub = upper)
148148
result = solve(optprob, BFGS()) #OptimizationOptimJL detects that it's a box constrained problem and use Fminbox wrapper over BFGS
149149
```

docs/src/tutorials/generalized_likelihood.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ corresponding to that distribution fit:
6969

7070
```@example likelihood
7171
obj = build_loss_objective(prob1, Tsit5(), LogLikeLoss(t, distributions),
72-
maxiters = 10000, verbose = false)
72+
maxiters = 10000, verbose = false)
7373
```
7474

7575
First, let's use the objective function to plot the likelihood landscape:
@@ -79,8 +79,8 @@ using Plots;
7979
plotly();
8080
prange = 0.5:0.1:5.0
8181
heatmap(prange, prange, [obj([j, i]) for i in prange, j in prange],
82-
yscale = :log10, xlabel = "Parameter 1", ylabel = "Parameter 2",
83-
title = "Likelihood Landscape")
82+
yscale = :log10, xlabel = "Parameter 1", ylabel = "Parameter 2",
83+
title = "Likelihood Landscape")
8484
```
8585

8686
![2 Parameter Likelihood](../assets/2paramlike.png)
@@ -92,8 +92,8 @@ one-dimensional slice:
9292

9393
```julia
9494
plot(prange, [obj([1.5, i]) for i in prange], lw = 3,
95-
title = "Parameter 2 Likelihood (Parameter 1 = 1.5)",
96-
xlabel = "Parameter 2", ylabel = "Objective Function Value")
95+
title = "Parameter 2 Likelihood (Parameter 1 = 1.5)",
96+
xlabel = "Parameter 2", ylabel = "Objective Function Value")
9797
```
9898

9999
![1 Parameter Likelihood](../assets/1paramlike.png)

docs/src/tutorials/global_optimization.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -56,10 +56,10 @@ We can even use things like the Improved Stochastic Ranking Evolution Strategy
5656
```@example global_optimization
5757
optprob = Optimization.OptimizationProblem(obj, [0.2], lb = [-1.0], ub = [5.0])
5858
res = solve(optprob,
59-
OptimizationMOI.MOI.OptimizerWithAttributes(NLopt.Optimizer,
60-
"algorithm" => :GN_ISRES,
61-
"xtol_rel" => 1e-3,
62-
"maxeval" => 10000))
59+
OptimizationMOI.MOI.OptimizerWithAttributes(NLopt.Optimizer,
60+
"algorithm" => :GN_ISRES,
61+
"xtol_rel" => 1e-3,
62+
"maxeval" => 10000))
6363
```
6464

6565
which is very robust to the initial condition. We can also directly use the NLopt interface as below. The fastest result comes from the

docs/src/tutorials/stochastic_evaluations.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -52,8 +52,8 @@ We use Optim.jl for optimization below
5252

5353
```@example sde
5454
obj = build_loss_objective(monte_prob, SOSRI(), L2Loss(t, aggregate_data),
55-
Optimization.AutoForwardDiff(),
56-
maxiters = 10000, verbose = false, trajectories = 1000)
55+
Optimization.AutoForwardDiff(),
56+
maxiters = 10000, verbose = false, trajectories = 1000)
5757
optprob = Optimization.OptimizationProblem(obj, [1.0, 0.5])
5858
result = solve(optprob, Optim.BFGS())
5959
```
@@ -68,9 +68,9 @@ Instead, when we use `L2Loss` with first differencing enabled, we get much more
6868

6969
```@example sde
7070
obj = build_loss_objective(monte_prob, SRIW1(),
71-
L2Loss(t, aggregate_data, differ_weight = 1.0,
72-
data_weight = 0.5), Optimization.AutoForwardDiff(),
73-
verbose = false, trajectories = 1000, maxiters = 1000)
71+
L2Loss(t, aggregate_data, differ_weight = 1.0,
72+
data_weight = 0.5), Optimization.AutoForwardDiff(),
73+
verbose = false, trajectories = 1000, maxiters = 1000)
7474
optprob = Optimization.OptimizationProblem(obj, [1.0, 0.5])
7575
result = solve(optprob, Optim.BFGS())
7676
result.original

src/DiffEqParamEstim.jl

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,18 +7,18 @@ import PreallocationTools
77
STANDARD_PROB_GENERATOR(prob, p) = remake(prob; u0 = eltype(p).(prob.u0), p = p)
88
function STANDARD_PROB_GENERATOR(prob::EnsembleProblem, p)
99
EnsembleProblem(remake(prob.prob; u0 = eltype(p).(prob.prob.u0), p = p),
10-
output_func = prob.output_func,
11-
prob_func = prob.prob_func,
12-
reduction = prob.reduction,
13-
u_init = prob.u_init)
10+
output_func = prob.output_func,
11+
prob_func = prob.prob_func,
12+
reduction = prob.reduction,
13+
u_init = prob.u_init)
1414
end
1515
STANDARD_MS_PROB_GENERATOR = function (prob, p, k)
1616
t0, tf = prob.tspan
1717
P, N = length(prob.p), length(prob.u0)
1818
K = Int((length(p) - P) / N)
1919
τ = range(t0, tf, length = K + 1)
2020
remake(prob; u0 = p[(1 + (k - 1) * N):(k * N)], p = p[(end - P + 1):end],
21-
tspan = (τ[k], τ[k + 1]))
21+
tspan = (τ[k], τ[k + 1]))
2222
end
2323

2424
include("cost_functions.jl")

src/build_loss_objective.jl

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,16 @@
11
export build_loss_objective
22

33
function build_loss_objective(prob::SciMLBase.AbstractSciMLProblem, alg, loss,
4-
adtype = SciMLBase.NoAD(),
5-
regularization = nothing, args...;
6-
priors = nothing,
7-
prob_generator = STANDARD_PROB_GENERATOR,
8-
kwargs...)
4+
adtype = SciMLBase.NoAD(),
5+
regularization = nothing, args...;
6+
priors = nothing,
7+
prob_generator = STANDARD_PROB_GENERATOR,
8+
kwargs...)
99
cost_function = function (p, _ = nothing)
1010
tmp_prob = prob_generator(prob, p)
1111
if loss isa Union{L2Loss, LogLikeLoss}
1212
sol = solve(tmp_prob, alg, args...; saveat = loss.t, save_everystep = false,
13-
dense = false, kwargs...)
13+
dense = false, kwargs...)
1414
else
1515
sol = solve(tmp_prob, alg, args...; kwargs...)
1616
end

src/cost_functions.jl

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -89,10 +89,12 @@ function (f::L2Loss)(sol::SciMLBase.AbstractSciMLSolution)
8989
for j in 1:length(sol[i])
9090
if diff_weight isa Real
9191
sumsq += diff_weight *
92-
((data[j, i] - data[j, i - 1] - sol[j, i] + sol[j, i - 1])^2)
92+
((data[j, i] - data[j, i - 1] - sol[j, i] +
93+
sol[j, i - 1])^2)
9394
else
9495
sumsq += diff_weight[j, i] *
95-
((data[j, i] - data[j, i - 1] - sol[j, i] + sol[j, i - 1])^2)
96+
((data[j, i] - data[j, i - 1] - sol[j, i] +
97+
sol[j, i - 1])^2)
9698
end
9799
end
98100
end
@@ -112,10 +114,12 @@ function (f::L2Loss)(sol::SciMLBase.AbstractSciMLSolution)
112114
for j in 1:length(sol[i])
113115
if diff_weight isa Real
114116
sumsq += diff_weight *
115-
((data[j, i] - data[j, i - 1] - sol[j, i] + sol[j, i - 1])^2)
117+
((data[j, i] - data[j, i - 1] - sol[j, i] +
118+
sol[j, i - 1])^2)
116119
else
117120
sumsq += diff_weight[j, i] *
118-
((data[j, i] - data[j, i - 1] - sol[j, i] + sol[j, i - 1])^2)
121+
((data[j, i] - data[j, i - 1] - sol[j, i] +
122+
sol[j, i - 1])^2)
119123
end
120124
end
121125
end
@@ -135,11 +139,11 @@ end
135139
matrixize(x) = x isa Vector ? reshape(x, 1, length(x)) : x
136140

137141
function L2Loss(t, data; differ_weight = nothing, data_weight = nothing,
138-
colloc_grad = nothing,
139-
dudt = nothing)
142+
colloc_grad = nothing,
143+
dudt = nothing)
140144
L2Loss(t, matrixize(data), matrixize(differ_weight),
141-
matrixize(data_weight), matrixize(colloc_grad),
142-
colloc_grad == nothing ? nothing : zeros(size(colloc_grad)))
145+
matrixize(data_weight), matrixize(colloc_grad),
146+
colloc_grad == nothing ? nothing : zeros(size(colloc_grad)))
143147
end
144148

145149
function (f::L2Loss)(sol::DiffEqBase.AbstractEnsembleSolution)

0 commit comments

Comments
 (0)