Skip to content

Commit

Permalink
Merge pull request #73 from ArnoStrouwen/CI
Browse files Browse the repository at this point in the history
Revamp CI
  • Loading branch information
ChrisRackauckas authored Jan 22, 2024
2 parents 64565f8 + dba411d commit 3f1df12
Show file tree
Hide file tree
Showing 9 changed files with 140 additions and 67 deletions.
32 changes: 19 additions & 13 deletions .github/workflows/CI.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,28 +10,34 @@ on:
- main
paths-ignore:
- 'docs/**'
schedule:
- cron: '41 0 * * 5'
jobs:
test:
runs-on: ubuntu-latest
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
group:
- Core
version:
- '1'
os:
- ubuntu-latest
- macos-latest
- windows-latest
steps:
- uses: actions/checkout@v4
- uses: julia-actions/setup-julia@v1
with:
version: 1
- uses: actions/cache@v4
env:
cache-name: cache-artifacts
version: ${{ matrix.version }}
- uses: julia-actions/cache@v1
with:
path: ~/.julia/artifacts
key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }}
restore-keys: |
${{ runner.os }}-test-${{ env.cache-name }}-
${{ runner.os }}-test-
${{ runner.os }}-
token: ${{ secrets.GITHUB_TOKEN }}
- uses: julia-actions/julia-buildpkg@v1
- uses: julia-actions/julia-runtest@v1
env:
GROUP: ${{ matrix.group }}
with:
depwarn: error
- uses: julia-actions/julia-processcoverage@v1
- uses: codecov/codecov-action@v3
with:
Expand Down
41 changes: 41 additions & 0 deletions .github/workflows/Downgrade.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
name: Downgrade
on:
pull_request:
branches:
- main
paths-ignore:
- 'docs/**'
push:
branches:
- main
paths-ignore:
- 'docs/**'
schedule:
- cron: '41 0 * * 5'
jobs:
test:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
group:
- Core
version:
- '1'
os:
- ubuntu-latest
- macos-latest
- windows-latest
steps:
- uses: actions/checkout@v4
- uses: julia-actions/setup-julia@v1
with:
version: ${{ matrix.version }}
- uses: cjdoris/julia-downgrade-compat-action@v1
with:
skip: Pkg,TOML
- uses: julia-actions/cache@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
- uses: julia-actions/julia-buildpkg@v1
- uses: julia-actions/julia-runtest@v1
5 changes: 3 additions & 2 deletions .github/workflows/documentation.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,16 @@ on:
- main
tags: '*'
pull_request:

schedule:
- cron: '41 0 * * 5'
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: julia-actions/setup-julia@latest
with:
version: '1.10'
version: '1'
- name: Install dependencies
run: julia --project=docs/ -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()'
- name: Build and deploy
Expand Down
37 changes: 26 additions & 11 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ version = "1.2.1"
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
DiffEqBase = "2b5f629d-d688-5b77-993f-72d75c75574e"
DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
ExprTools = "e2ba6199-217a-4e67-a87a-7c52f15ade04"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
Functors = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Expand All @@ -18,19 +17,35 @@ SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
StochasticDiffEq = "789caeaf-c7a9-5a7d-9973-96adeb23e2a0"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
TestEnv = "1e6cf692-eddd-4d53-88a5-2d735e33781b"
Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
cuDNN = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd"

[compat]
CUDA = "3, 4, 5"
DiffEqBase = "6"
DocStringExtensions = "0.8, 0.9"
ExprTools = "0.1"
Flux = "0.13, 0.14"
Functors = "0.2, 0.3, 0.4"
Aqua = "0.8"
CUDA = "4.4, 5"
DiffEqBase = "6.137"
DocStringExtensions = "0.9"
Flux = "0.13.12, 0.14"
Functors = "0.4"
LinearAlgebra = "1.10"
Random = "1.10"
Reexport = "1"
Statistics = "1"
Zygote = "0.6"
julia = "1.8, 1.9"
SafeTestsets = "0.1"
SciMLSensitivity = "7.49"
SparseArrays = "1.10"
Statistics = "1.10"
StochasticDiffEq = "6.63"
Test = "1.10"
Tracker = "0.2.18"
Zygote = "0.6.61"
cuDNN = "1.1"
julia = "1.10"

[extras]
Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595"
SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[targets]
test = ["Aqua", "Test", "SafeTestsets"]
10 changes: 5 additions & 5 deletions src/DeepBSDE.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ DeepBSDE(u0,σᵀ∇u;opt=Flux.Optimise.Adam(0.1))
## Arguments
- `u0`: a Flux.jl `Chain` with a d-dimensional input and a 1-dimensional output for the solytion guess.
- `σᵀ∇u`: a Flux.jl `Chain` for the BSDE value guess.
- `opt`: the optimization algorithm to be used to optimize the neural networks. Defaults to `ADAM(0.1)`.
- `opt`: the optimization algorithm to be used to optimize the neural networks. Defaults to `Flux.Optimise.Adam(0.1)`.
## Example
Black-Scholes-Barenblatt equation
Expand All @@ -28,7 +28,7 @@ g(X) = sum(X.^2)
σ_f(X,p,t) = Diagonal(sigma*X) #Matrix d x d
prob = PIDEProblem(g, f, μ_f, σ_f, x0, tspan)
hls = 10 + d #hiden layer size
hls = 10 + d #hidden layer size
opt = Flux.Optimise.Adam(0.001)
u0 = Flux.Chain(Dense(d,hls,relu),
Dense(hls,hls,relu),
Expand Down Expand Up @@ -188,7 +188,7 @@ function DiffEqBase.solve(prob::PIDEProblem,
Flux.train!(loss_n_sde, ps, data, opt; cb = cb)

if !limits
# Returning iters or simply u0(x0) and the tained neural network approximation u0
# Returning iters or simply u0(x0) and the trained neural network approximation u0
if save_everystep
sol = PIDESolution(x0, tspan[1]:dt:tspan[2], losses, iters, re1(p3))
else
Expand Down Expand Up @@ -254,8 +254,8 @@ function DiffEqBase.solve(prob::PIDEProblem,
true && println("Current loss is: $l")
l < 1e-6 && Flux.stop()
end
dataS = Iterators.repeated((), maxiters_limits)
Flux.train!(loss_, ps, dataS, ADAM(0.01); cb = cb)
dataS = Iterators.repeated((), maxiters_upper)
Flux.train!(loss_, ps, dataS, Flux.Optimise.Adam(0.01); cb = cb)
u_high = loss_()

verbose && println("Lower limit")
Expand Down
15 changes: 8 additions & 7 deletions src/DeepSplitting.jl
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
Base.copy(t::Tuple) = t # required for below
function Base.copy(opt::O) where {O <: Flux.Optimise.AbstractOptimiser}
return O([copy(getfield(opt, f)) for f in fieldnames(typeof(opt))]...)
_copy(t::Tuple) = t
_copy(t) = t
function _copy(opt::O) where O<:Flux.Optimise.AbstractOptimiser
return O([_copy(getfield(opt,f)) for f in fieldnames(typeof(opt))]...)
end

"""
DeepSplitting(nn, K=1, opt = ADAM(0.01), λs = nothing, mc_sample = NoSampling())
DeepSplitting(nn, K=1, opt = Flux.Optimise.Adam(0.01), λs = nothing, mc_sample = NoSampling())
Deep splitting algorithm.
Expand All @@ -25,7 +26,7 @@ nn = Flux.Chain(Dense(d, hls, tanh),
Dense(hls,hls,tanh),
Dense(hls, 1, x->x^2))
alg = DeepSplitting(nn, K=10, opt = ADAM(), λs = [5e-3,1e-3],
alg = DeepSplitting(nn, K=10, opt = Flux.Optimise.Adam(), λs = [5e-3,1e-3],
mc_sample = UniformSampling(zeros(d), ones(d)) )
```
"""
Expand All @@ -39,7 +40,7 @@ end

function DeepSplitting(nn;
K = 1,
opt::O = ADAM(0.01),
opt::O = Flux.Optimise.Adam(0.01),
λs::L = nothing,
mc_sample = NoSampling()) where {
O <: Flux.Optimise.AbstractOptimiser,
Expand Down Expand Up @@ -167,7 +168,7 @@ function DiffEqBase.solve(prob::PIDEProblem,
_maxiters = length(maxiters) > 1 ? maxiters[min(net, 2)] : maxiters[]

for λ in λs
opt_net = copy(opt) # starting with a new optimiser state at each time step
opt_net = _copy(opt) # starting with a new optimiser state at each time step
opt_net.eta = λ
verbose &&
println("Training started with ", typeof(opt_net), " and λ :", opt_net.eta)
Expand Down
40 changes: 20 additions & 20 deletions test/DeepSplitting.jl
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ end
Dense(hls, hls, relu),
Dense(hls, 1)) # Neural network used by the scheme

opt = ADAM(0.01) #optimiser
opt = Flux.Optimise.Adam(0.01) #optimiser
alg = DeepSplitting(nn, opt = opt)

f(y, z, v_y, v_z, ∇v_y, ∇v_z, p, t) = 0.0f0 .* v_y
Expand Down Expand Up @@ -88,7 +88,7 @@ end
Dense(hls, hls, relu),
Dense(hls, 1)) # Neural network used by the scheme

opt = ADAM(0.01) #optimiser
opt = Flux.Optimise.Adam(0.01) #optimiser
alg = DeepSplitting(nn, opt = opt)

f(y, z, v_y, v_z, ∇v_y, ∇v_z, p, t) = 0.0f0 .* v_y #TODO: this fix is not nice
Expand All @@ -107,7 +107,7 @@ end
u1_anal = [u_anal(x, tspan[end]) for x in eachcol(xs)]
e_l2 = mean(rel_error_l2.(u1, u1_anal))
println("rel_error_l2 = ", e_l2, "\n")
@test e_l2 < 0.13
@test e_l2 < 0.185
end
end

Expand All @@ -134,7 +134,7 @@ end
Dense(hls, hls, relu),
Dense(hls, 1)) # Neural network used by the scheme

opt = ADAM(0.01) #optimiser
opt = Flux.Optimise.Adam(0.01) #optimiser
alg = DeepSplitting(nn, opt = opt)

f(y, z, v_y, v_z, ∇v_y, ∇v_z, p, t) = 0.0f0 .* v_y #TODO: this fix is not nice
Expand Down Expand Up @@ -192,7 +192,7 @@ end
Dense(hls, hls, relu),
Dense(hls, 1)) # Neural network used by the scheme

opt = ADAM(0.01) #optimiser
opt = Flux.Optimise.Adam(0.01) #optimiser
alg = DeepSplitting(nn, opt = opt)

f(y, z, v_y, v_z, ∇v_y, ∇v_z, p, t) = r * v_y #TODO: this fix is not nice
Expand Down Expand Up @@ -234,8 +234,8 @@ end
Dense(hls, hls, tanh),
Dense(hls, 1)) # Neural network used by the scheme

opt = ADAM(1e-3) #optimiser
alg = DeepSplitting(nn, opt = opt)
opt = Flux.Optimise.Adam(1e-3) #optimiser
alg = DeepSplitting(nn, opt = opt )

X0 = fill(0.0f0, d) # initial point
g(X) = 1.0f0 ./ (2.0f0 .+ 4.0f-1 * sum(X .^ 2, dims = 1)) # initial condition
Expand All @@ -257,7 +257,7 @@ end
u1 = sol.us[end]
# value coming from \cite{Beck2017a}
e_l2 = rel_error_l2(u1, 0.30879)
@test e_l2 < 0.5 # this is quite high as a relative error.
@test e_l2 < 0.5 # this is quite high as a relative error.
println("d = $d, rel_error_l2 = $e_l2")
end
end
Expand All @@ -281,8 +281,8 @@ end
Dense(hls, hls, relu),
Dense(hls, 1)) # Neural network used by the scheme

opt = ADAM(1e-2) #optimiser
alg = DeepSplitting(nn, opt = opt)
opt = Flux.Optimise.Adam(1e-2) #optimiser
alg = DeepSplitting(nn, opt = opt )

X0 = fill(0.0f0, d) # initial point
g(X) = exp.(-0.25f0 * sum(X .^ 2, dims = 1)) # initial condition
Expand Down Expand Up @@ -329,8 +329,8 @@ if false
Dense(hls, hls, tanh),
Dense(hls, 1)) # Neural network used by the scheme

opt = ADAM(1e-3) #optimiser
alg = DeepSplitting(nn, opt = opt)
opt = Flux.Optimise.Adam(1e-3) #optimiser
alg = DeepSplitting(nn, opt = opt )

X0 = repeat([1.0f0, 0.5f0], div(d, 2)) # initial point
g(X) = sum(X .^ 2, dims = 1) # initial condition
Expand Down Expand Up @@ -381,8 +381,8 @@ if false
Dense(hls, hls, tanh),
Dense(hls, 1)) # Neural network used by the scheme

opt = ADAM(1e-3) #optimiser
alg = DeepSplitting(nn, opt = opt)
opt = Flux.Optimise.Adam(1e-3) #optimiser
alg = DeepSplitting(nn, opt = opt )

X0 = fill(0.0f0, d) # initial point
g(X) = log.(5.0f-1 .+ 5.0f-1 * sum(X .^ 2, dims = 1)) # initial condition
Expand Down Expand Up @@ -430,8 +430,8 @@ end
Dense(hls, hls, tanh),
Dense(hls, 1)) # Neural network used by the scheme

opt = ADAM()
alg = DeepSplitting(nn, opt = opt, λs = [1e-2, 1e-3])
opt = Flux.Optimise.Adam()
alg = DeepSplitting(nn, opt = opt, λs = [1e-2,1e-3] )

X0 = fill(100.0f0, d) # initial point
g(X) = minimum(X, dims = 1) # initial condition
Expand Down Expand Up @@ -519,8 +519,8 @@ end
# BatchNorm(hls, affine = true, dim = 1),
Dense(hls, 1, x -> x^2)) # positive function

opt = ADAM(1e-2)#optimiser
alg = DeepSplitting(nn_batch, K = K, opt = opt, mc_sample = x0_sample)
opt = Flux.Optimise.Adam(1e-2)#optimiser
alg = DeepSplitting(nn_batch, K=K, opt = opt, mc_sample = x0_sample)

function g(x)
Float32((2 * π)^(-d / 2)) * ss0^(-Float32(d) * 5.0f-1) *
Expand Down Expand Up @@ -575,8 +575,8 @@ end
Dense(hls, hls, tanh),
Dense(hls, 1)) # Neural network used by the scheme

opt = ADAM(1e-2) #optimiser
alg = DeepSplitting(nn, K = K, opt = opt, mc_sample = UniformSampling(-∂, ∂))
opt = Flux.Optimise.Adam(1e-2) #optimiser
alg = DeepSplitting(nn, K=K, opt = opt, mc_sample = UniformSampling(-∂, ∂) )

x0 = fill(0.0f0, d) # initial point
g(X) = exp.(-0.25f0 * sum(X .^ 2, dims = 1)) # initial condition
Expand Down
11 changes: 11 additions & 0 deletions test/qa.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
using HighDimPDE, Aqua
@testset "Aqua" begin
Aqua.find_persistent_tasks_deps(HighDimPDE)
Aqua.test_ambiguities(HighDimPDE, recursive = false)
Aqua.test_deps_compat(HighDimPDE)
Aqua.test_piracies(HighDimPDE)
Aqua.test_project_extras(HighDimPDE)
Aqua.test_stale_deps(HighDimPDE)
Aqua.test_unbound_args(HighDimPDE)
Aqua.test_undefined_exports(HighDimPDE)
end
Loading

0 comments on commit 3f1df12

Please sign in to comment.