Skip to content

Commit

Permalink
Add GH action to format files with JuliaFormatter
Browse files Browse the repository at this point in the history
  • Loading branch information
Saransh-cpp committed Oct 5, 2022
1 parent 66dddb9 commit e01eb33
Show file tree
Hide file tree
Showing 48 changed files with 4,941 additions and 4,638 deletions.
27 changes: 27 additions & 0 deletions .github/workflows/format.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
name: Format suggestions

on:
pull_request:

concurrency:
# Skip intermediate builds: always.
# Cancel intermediate builds: only if it is a pull request build.
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }}

jobs:
format:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: julia-actions/setup-julia@latest
with:
version: 1
- run: |
julia -e 'using Pkg; Pkg.add("JuliaFormatter")'
julia -e 'using JuliaFormatter; format("."; verbose=true)'
- uses: reviewdog/action-suggester@v1
with:
tool_name: JuliaFormatter
fail_on_error: true
filter_mode: added
106 changes: 56 additions & 50 deletions docs/make.jl
Original file line number Diff line number Diff line change
@@ -1,56 +1,62 @@
using Documenter, Flux, NNlib, Functors, MLUtils, BSON, Optimisers, OneHotArrays, Zygote, ChainRulesCore

using Documenter, Flux, NNlib, Functors, MLUtils, BSON, Optimisers, OneHotArrays, Zygote,
ChainRulesCore

DocMeta.setdocmeta!(Flux, :DocTestSetup, :(using Flux); recursive = true)

makedocs(
modules = [Flux, NNlib, Functors, MLUtils, BSON, Optimisers, OneHotArrays, Zygote, ChainRulesCore, Base],
doctest = false,
sitename = "Flux",
# strict = [:cross_references,],
pages = [
"Home" => "index.md",
"Building Models" => [
"Overview" => "models/overview.md",
"Basics" => "models/basics.md",
"Recurrence" => "models/recurrence.md",
"Layer Reference" => "models/layers.md",
"Loss Functions" => "models/losses.md",
"Regularisation" => "models/regularisation.md",
"Custom Layers" => "models/advanced.md",
"NNlib.jl" => "models/nnlib.md",
"Activation Functions" => "models/activation.md",
],
"Handling Data" => [
"MLUtils.jl" => "data/mlutils.md",
"OneHotArrays.jl" => "data/onehot.md",
],
"Training Models" => [
"Optimisers" => "training/optimisers.md",
"Training" => "training/training.md",
"Callback Helpers" => "training/callbacks.md",
"Zygote.jl" => "training/zygote.md",
makedocs(;
modules = [
Flux,
NNlib,
Functors,
MLUtils,
BSON,
Optimisers,
OneHotArrays,
Zygote,
ChainRulesCore,
Base,
],
"GPU Support" => "gpu.md",
"Model Tools" => [
"Saving & Loading" => "saving.md",
"Shape Inference" => "outputsize.md",
"Weight Initialisation" => "utilities.md",
"Functors.jl" => "models/functors.md",
doctest = false,
sitename = "Flux",
# strict = [:cross_references,],
pages = [
"Home" => "index.md",
"Building Models" => [
"Overview" => "models/overview.md",
"Basics" => "models/basics.md",
"Recurrence" => "models/recurrence.md",
"Layer Reference" => "models/layers.md",
"Loss Functions" => "models/losses.md",
"Regularisation" => "models/regularisation.md",
"Custom Layers" => "models/advanced.md",
"NNlib.jl" => "models/nnlib.md",
"Activation Functions" => "models/activation.md",
],
"Handling Data" => [
"MLUtils.jl" => "data/mlutils.md",
"OneHotArrays.jl" => "data/onehot.md",
],
"Training Models" => [
"Optimisers" => "training/optimisers.md",
"Training" => "training/training.md",
"Callback Helpers" => "training/callbacks.md",
"Zygote.jl" => "training/zygote.md",
],
"GPU Support" => "gpu.md",
"Model Tools" => [
"Saving & Loading" => "saving.md",
"Shape Inference" => "outputsize.md",
"Weight Initialisation" => "utilities.md",
"Functors.jl" => "models/functors.md",
],
"Performance Tips" => "performance.md",
"Flux's Ecosystem" => "ecosystem.md",
],
"Performance Tips" => "performance.md",
"Flux's Ecosystem" => "ecosystem.md",
],
format = Documenter.HTML(
sidebar_sitename = false,
analytics = "UA-36890222-9",
assets = ["assets/flux.css"],
prettyurls = get(ENV, "CI", nothing) == "true"
),
)
format = Documenter.HTML(; sidebar_sitename = false,
analytics = "UA-36890222-9",
assets = ["assets/flux.css"],
prettyurls = get(ENV, "CI", nothing) == "true"))

deploydocs(
repo = "github.com/FluxML/Flux.jl.git",
target = "build",
push_preview = true
)
deploydocs(; repo = "github.com/FluxML/Flux.jl.git",
target = "build",
push_preview = true)
25 changes: 14 additions & 11 deletions perf/bench_utils.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,36 +3,39 @@ using Flux
using CUDA
using Zygote: pullback, ignore


fw(m, x) = m(x)
bw(back) = back(1f0)
bw(back) = back(1.0f0)
fwbw(m, ps, x) = gradient(() -> sum(fw(m, x)), ps)
pb(m, ps, x) = pullback(() -> sum(fw(m, x)), ps)

function run_benchmark(model, x; cuda=true)

if cuda
function run_benchmark(model, x; cuda = true)
if cuda
model = model |> gpu
x = x |> gpu
end

ps = Flux.params(model)
y, back = pb(model, ps, x)

y, back = pb(model, ps, x)

if cuda
CUDA.allowscalar(false)
# CUDA.device!(3)
println(" forward")
fw(model, x); GC.gc(); CUDA.reclaim(); #warmup
fw(model, x)
GC.gc()
CUDA.reclaim() #warmup
@btime CUDA.@sync(fw($model, $x)) teardown=(GC.gc(); CUDA.reclaim())

println(" backward")
bw(back); GC.gc(); CUDA.reclaim(); #warmup
bw(back)
GC.gc()
CUDA.reclaim() #warmup
@btime CUDA.@sync(bw($back)) teardown=(GC.gc(); CUDA.reclaim())

println(" forw and back")
fwbw(model, ps, x); GC.gc(); CUDA.reclaim(); #warmup
fwbw(model, ps, x)
GC.gc()
CUDA.reclaim() #warmup
@btime CUDA.@sync(fwbw($model, $ps, $x)) teardown=(GC.gc(); CUDA.reclaim())
else
println(" forward")
Expand Down
6 changes: 3 additions & 3 deletions perf/conv.jl
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
for ch in [1, 3, 16, 64]
x = rand(Float32, 64, 64, ch, 64)
model = Conv((3,3), ch=>ch)
model = Conv((3, 3), ch => ch)
println("CPU ch=$ch")
run_benchmark(model, x, cuda=false)
run_benchmark(model, x; cuda = false)
println("CUDA ch=$ch")
run_benchmark(model, x, cuda=true)
run_benchmark(model, x; cuda = true)
end
4 changes: 2 additions & 2 deletions perf/dense.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ for n in [2, 20, 200, 2000]
x = randn(Float32, n, n)
model = Dense(n, n)
println("CPU n=$n")
run_benchmark(model, x, cuda=false)
run_benchmark(model, x; cuda = false)
println("CUDA n=$n")
run_benchmark(model, x, cuda=true)
run_benchmark(model, x; cuda = true)
end
78 changes: 39 additions & 39 deletions perf/recurrent.jl
Original file line number Diff line number Diff line change
@@ -1,62 +1,62 @@


struct RNNWrapper{T}
rnn::T
rnn::T
end
Flux.@functor RNNWrapper

# Need to specialize for RNNWrapper.
fw(r::RNNWrapper, X::Vector{<:AbstractArray}) = begin
Flux.reset!(r.rnn)
[r.rnn(x) for x in X]
Flux.reset!(r.rnn)
[r.rnn(x) for x in X]
end

fw(r::RNNWrapper, X) = begin
Flux.reset!(r.rnn)
r.rnn(X)
Flux.reset!(r.rnn)
r.rnn(X)
end

fwbw(r::RNNWrapper, ps, X::Vector{<:AbstractArray}) = gradient(ps) do
y = fw(r, X)
sum(sum(y))
end
fwbw(r::RNNWrapper, ps, X::Vector{<:AbstractArray}) =
gradient(ps) do
y = fw(r, X)
return sum(sum(y))
end

pb(r::RNNWrapper, ps, X::Vector{<:AbstractArray}) = pullback(ps) do
y = fw(r, X)
sum(sum(y))
end
pb(r::RNNWrapper, ps, X::Vector{<:AbstractArray}) =
pullback(ps) do
y = fw(r, X)
return sum(sum(y))
end

function rnn_benchmark_sweep(data_creator::Function, rnn_type)
for n in [2, 20, 200, 1000], ts in [1, 4, 16, 64]
x, x_n = data_creator(n, ts)
model = RNNWrapper(rnn_type(n, n))

println("$rnn_type $x_n CPU n=$n, ts=$ts")
run_benchmark(model, x, cuda=false)

println("$rnn_type $x_n CUDA n=$n, ts=$ts")
try
run_benchmark(model, x, cuda=true)
catch ex
@show typeof(ex)
if ex isa OutOfGPUMemoryError
@warn "Not enough GPU memory to run test"
else
rethrow(ex)
end
for n in [2, 20, 200, 1000], ts in [1, 4, 16, 64]
x, x_n = data_creator(n, ts)
model = RNNWrapper(rnn_type(n, n))

println("$rnn_type $x_n CPU n=$n, ts=$ts")
run_benchmark(model, x; cuda = false)

println("$rnn_type $x_n CUDA n=$n, ts=$ts")
try
run_benchmark(model, x; cuda = true)
catch ex
@show typeof(ex)
if ex isa OutOfGPUMemoryError
@warn "Not enough GPU memory to run test"
else
rethrow(ex)
end
end
end
end
end

for rnn_type in [Flux.RNN, Flux.GRU, Flux.LSTM]
rnn_benchmark_sweep(rnn_type) do n, ts
[randn(Float32, n, n) for _ in 1:ts], "Vec"
end
rnn_benchmark_sweep(rnn_type) do n, ts
return [randn(Float32, n, n) for _ in 1:ts], "Vec"
end
end

for rnn_type in [Flux.RNN, Flux.GRU, Flux.LSTM]
rnn_benchmark_sweep(rnn_type) do n, ts
randn(Float32, n, n, ts), "Block"
end
rnn_benchmark_sweep(rnn_type) do n, ts
return randn(Float32, n, n, ts), "Block"
end
end

Loading

0 comments on commit e01eb33

Please sign in to comment.