Skip to content

Commit

Permalink
Try marking tests as not broken
Browse files Browse the repository at this point in the history
  • Loading branch information
avik-pal committed Jun 30, 2024
1 parent 1a61165 commit 465b40d
Show file tree
Hide file tree
Showing 10 changed files with 100 additions and 206 deletions.
16 changes: 0 additions & 16 deletions test/core_tests.jl

This file was deleted.

6 changes: 0 additions & 6 deletions test/helpers/batched_ad_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,6 @@
rng = StableRNG(12345)

@testset "$mode" for (mode, aType, dev, ongpu) in MODES
# FIXME: AMDGPU takes too long right now
mode === "amdgpu" && continue

models = (
Chain(Conv((3, 3), 2 => 4, gelu; pad=SamePad()),
Conv((3, 3), 4 => 2, gelu; pad=SamePad()), FlattenLayer(), Dense(18 => 2)),
Expand Down Expand Up @@ -84,9 +81,6 @@ end
rng = StableRNG(12345)

@testset "$mode" for (mode, aType, dev, ongpu) in MODES
# FIXME: AMDGPU takes too long right now
mode === "amdgpu" && continue

models = (
Chain(Conv((3, 3), 2 => 4, gelu; pad=SamePad()),
Conv((3, 3), 4 => 2, gelu; pad=SamePad()), FlattenLayer(), Dense(18 => 2)),
Expand Down
15 changes: 0 additions & 15 deletions test/helpers/nestedad_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,6 @@
rng = StableRNG(1234)

@testset "$mode" for (mode, aType, dev, ongpu) in MODES
# FIXME: AMDGPU takes too long right now
mode === "amdgpu" && continue

Xs = (aType(randn(rng, Float32, 3, 3, 2, 4)), aType(randn(rng, Float32, 2, 4)),
aType(randn(rng, Float32, 2, 4)), aType(randn(rng, Float32, 3, 3, 2, 4)))
models = (
Expand Down Expand Up @@ -84,9 +81,6 @@ end
rng = StableRNG(1234)

@testset "$mode" for (mode, aType, dev, ongpu) in MODES
# FIXME: AMDGPU takes too long right now
mode === "amdgpu" && continue

Xs = (aType(randn(rng, Float32, 3, 3, 2, 4)), aType(randn(rng, Float32, 2, 4)),
aType(randn(rng, Float32, 2, 4)), aType(randn(rng, Float32, 3, 3, 2, 4)))
models = (
Expand Down Expand Up @@ -166,9 +160,6 @@ end
rng = StableRNG(1234)

@testset "$mode" for (mode, aType, dev, ongpu) in MODES
# FIXME: AMDGPU takes too long right now
mode === "amdgpu" && continue

@testset "Structured Matrix: Issue LuxDL/Lux.jl#602" begin
model = @compact(; potential=Dense(5 => 5, gelu)) do x
@return reshape(diag(only(Zygote.jacobian(potential, x))), size(x))
Expand Down Expand Up @@ -206,9 +197,6 @@ end
rng = StableRNG(1234)

@testset "$mode" for (mode, aType, dev, ongpu) in MODES
# FIXME: AMDGPU takes too long right now
mode === "amdgpu" && continue

models = (
Chain(Conv((3, 3), 2 => 4, gelu; pad=SamePad()), BatchNorm(4),
Conv((3, 3), 4 => 1, gelu; pad=SamePad())),
Expand Down Expand Up @@ -278,9 +266,6 @@ end
rng = StableRNG(1234)

@testset "$mode" for (mode, aType, dev, ongpu) in MODES
# FIXME: AMDGPU takes too long right now
mode === "amdgpu" && continue

x = rand(rng, 3, 3) |> aType
v = vec(rand(rng, 3, 3)) |> aType

Expand Down
4 changes: 2 additions & 2 deletions test/layers/containers_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@

@jet layer(x, ps, st)
__f = x -> sum(first(layer(x, ps, st)))
@eval @test_gradients $__f $x atol=1.0f-3 rtol=1.0f-3 reverse_diff_broken=true gpu_testing=$ongpu
@eval @test_gradients $__f $x atol=1.0f-3 rtol=1.0f-3 gpu_testing=$ongpu
end

@testset "concat size" begin
Expand Down Expand Up @@ -46,7 +46,7 @@ end

@jet layer(x, ps, st)
__f = x -> sum(first(layer(x, ps, st)))
@eval @test_gradients $__f $x atol=1.0f-3 rtol=1.0f-3 reverse_diff_broken=true gpu_testing=$ongpu
@eval @test_gradients $__f $x atol=1.0f-3 rtol=1.0f-3 gpu_testing=$ongpu
end

@testset "concat size" begin
Expand Down
160 changes: 44 additions & 116 deletions test/layers/conv_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -104,75 +104,39 @@ end
display(layer)
ps, st = Lux.setup(rng, layer) |> dev

broken = false
try
layer(x, ps, st)
broken = false
catch
@warn "$mode Test broken for $layer"
broken = true
end
layer(x, ps, st)
@test size(ps.weight) == (3, 3, 2)
@test size(layer(x, ps, st)[1]) == (2, 2, 1)

if !broken
@test size(ps.weight) == (3, 3, 2)
@test size(layer(x, ps, st)[1]) == (2, 2, 1)

@jet layer(x, ps, st)
__f = (x, ps) -> sum(first(layer(x, ps, st)))
@eval @test_gradients $__f $x $ps atol=1.0f-3 rtol=1.0f-3 gpu_testing=$ongpu
else
@test_broken !broken
end
@jet layer(x, ps, st)
__f = (x, ps) -> sum(first(layer(x, ps, st)))
@eval @test_gradients $__f $x $ps atol=1.0f-3 rtol=1.0f-3 gpu_testing=$ongpu

x = rand(rng, Float32, 4, 4, 6, 1) |> aType
layer = Conv((3, 3), 6 => 2; groups=2)
display(layer)
ps, st = Lux.setup(rng, layer) |> dev

broken = false
try
layer(x, ps, st)
broken = false
catch
@warn "$mode Test broken for $layer"
broken = true
end

if !broken
@test size(ps.weight) == (3, 3, 3, 2)
@test size(layer(x, ps, st)[1]) == (2, 2, 2, 1)
layer(x, ps, st)
@test size(ps.weight) == (3, 3, 3, 2)
@test size(layer(x, ps, st)[1]) == (2, 2, 2, 1)

@jet layer(x, ps, st)
__f = (x, ps) -> sum(first(layer(x, ps, st)))
@eval @test_gradients $__f $x $ps atol=1.0f-3 rtol=1.0f-3 gpu_testing=$ongpu
else
@test_broken !broken
end
@jet layer(x, ps, st)
__f = (x, ps) -> sum(first(layer(x, ps, st)))
@eval @test_gradients $__f $x $ps atol=1.0f-3 rtol=1.0f-3 gpu_testing=$ongpu

x = rand(rng, Float32, 4, 4, 4, 6, 1) |> aType
layer = Conv((3, 3, 3), 6 => 2; groups=2)
display(layer)
ps, st = Lux.setup(rng, layer) |> dev

broken = false
try
layer(x, ps, st)
broken = false
catch
@warn "$mode Test broken for $layer"
broken = true
end

if !broken
@test size(ps.weight) == (3, 3, 3, 3, 2)
@test size(layer(x, ps, st)[1]) == (2, 2, 2, 2, 1)
layer(x, ps, st)
@test size(ps.weight) == (3, 3, 3, 3, 2)
@test size(layer(x, ps, st)[1]) == (2, 2, 2, 2, 1)

@jet layer(x, ps, st)
__f = (x, ps) -> sum(first(layer(x, ps, st)))
@eval @test_gradients $__f $x $ps atol=1.0f-3 rtol=1.0f-3 gpu_testing=$ongpu
else
@test_broken !broken
end
@jet layer(x, ps, st)
__f = (x, ps) -> sum(first(layer(x, ps, st)))
@eval @test_gradients $__f $x $ps atol=1.0f-3 rtol=1.0f-3 gpu_testing=$ongpu

# Test that we cannot ask for non-integer multiplication factors
layer = Conv((2, 2), 3 => 10; groups=2)
Expand All @@ -188,22 +152,10 @@ end
x = rand(rng, Float32, 16, 32, 1) |> aType
ps, st = Lux.setup(rng, layer) |> dev

broken = false
try
layer(x, ps, st)
broken = false
catch
@warn "$mode Test broken for $layer"
broken = true
end

if !broken
@jet layer(x, ps, st)
__f = (x, ps) -> sum(first(layer(x, ps, st)))
@eval @test_gradients $__f $x $ps atol=1.0f-3 rtol=1.0f-3 gpu_testing=$ongpu
else
@test_broken !broken
end
layer(x, ps, st)
@jet layer(x, ps, st)
__f = (x, ps) -> sum(first(layer(x, ps, st)))
@eval @test_gradients $__f $x $ps atol=1.0f-3 rtol=1.0f-3 gpu_testing=$ongpu
end
end

Expand Down Expand Up @@ -288,29 +240,17 @@ end
display(layer)
ps, st = Lux.setup(rng, layer) |> dev

broken = false
try
layer(x, ps, st)
broken = false
catch
@warn "$mode Test broken for $layer"
broken = true
end

if !broken
if kwarg.stride == 1
@test size(layer(x, ps, st)[1]) == size(x)
else
@test size(layer(x, ps, st)[1])[1:(end - 2)] ==
cld.(size(x)[1:(end - 2)], kwarg.stride)
end

@jet layer(x, ps, st)
__f = (x, ps) -> sum(first(layer(x, ps, st)))
@eval @test_gradients $__f $x $ps gpu_testing=$ongpu atol=1e-3 rtol=1e-3
layer(x, ps, st)
if kwarg.stride == 1
@test size(layer(x, ps, st)[1]) == size(x)
else
@test_broken !broken
@test size(layer(x, ps, st)[1])[1:(end - 2)] ==
cld.(size(x)[1:(end - 2)], kwarg.stride)
end

@jet layer(x, ps, st)
__f = (x, ps) -> sum(first(layer(x, ps, st)))
@eval @test_gradients $__f $x $ps gpu_testing=$ongpu atol=1e-3 rtol=1e-3
end
end

Expand All @@ -325,7 +265,7 @@ end

y = zeros(eltype(ps.weight), 5, 5, 1, 1) |> aType
y[2:(end - 1), 2:(end - 1), 1, 1] = ps.weight
@test check_approx(y, layer(x, ps, st)[1])
@test y layer(x, ps, st)[1] rtol=1e-3 atol=1e-3

@jet layer(x, ps, st)

Expand All @@ -335,7 +275,7 @@ end

y = zeros(eltype(ps.weight), 5, 7, 1, 1) |> aType
y[2:(end - 1), 4, 1, 1] = ps.weight
@test check_approx(y, layer(x, ps, st)[1])
@test y layer(x, ps, st)[1] rtol=1e-3 atol=1e-3

@jet layer(x, ps, st)

Expand All @@ -345,7 +285,7 @@ end

y = zeros(eltype(ps.weight), 7, 5, 1, 1) |> aType
y[4, 2:(end - 1), 1, 1] = ps.weight
@test check_approx(y, layer(x, ps, st)[1])
@test y layer(x, ps, st)[1] rtol=1e-3 atol=1e-3

@jet layer(x, ps, st)

Expand All @@ -355,7 +295,7 @@ end

y = zeros(eltype(ps.weight), 7, 5, 1, 1) |> aType
y[4, 2:(end - 1), 1, 1] = ps.weight
@test check_approx(y, layer(x, ps, st)[1])
@test y layer(x, ps, st)[1] rtol=1e-3 atol=1e-3

@jet layer(x, ps, st)
end
Expand Down Expand Up @@ -521,29 +461,17 @@ end
display(layer)
ps, st = Lux.setup(rng, layer) |> dev

broken = false
try
layer(x, ps, st)
broken = false
catch
@warn "$mode Test broken for $layer"
broken = true
end

if !broken
if kwarg.stride == 1
@test size(layer(x, ps, st)[1]) == size(x)
else
@test size(layer(x, ps, st)[1])[1:(end - 2)] ==
cld.(size(x)[1:(end - 2)], kwarg.stride)
end

@jet layer(x, ps, st)
__f = (x, ps) -> sum(first(layer(x, ps, st)))
@eval @test_gradients $__f $x $ps gpu_testing=$ongpu atol=1e-3 rtol=1e-3
layer(x, ps, st)
if kwarg.stride == 1
@test size(layer(x, ps, st)[1]) == size(x)
else
@test_broken !broken
@test size(layer(x, ps, st)[1])[1:(end - 2)] ==
cld.(size(x)[1:(end - 2)], kwarg.stride)
end

@jet layer(x, ps, st)
__f = (x, ps) -> sum(first(layer(x, ps, st)))
@eval @test_gradients $__f $x $ps gpu_testing=$ongpu atol=1e-3 rtol=1e-3
end
end

Expand Down
28 changes: 9 additions & 19 deletions test/layers/recurrent_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -316,16 +316,11 @@ end
@test length(y_) == 4
@test all(x -> size(x) == (5, 2), y_)

if mode != "amdgpu"
__f = p -> sum(first(rnn(x, p, st)))
@eval @test_gradients $__f $ps atol=1e-2 rtol=1e-2 gpu_testing=$ongpu

__f = p -> sum(Base.Fix1(sum, abs2), first(rnn_seq(x, p, st)))
@eval @test_gradients $__f $ps atol=1e-2 rtol=1e-2 gpu_testing=$ongpu
else
# This is just added as a stub to remember about this broken test
@test_broken 1 + 1 == 1
end
__f = p -> sum(first(rnn(x, p, st)))
@eval @test_gradients $__f $ps atol=1e-2 rtol=1e-2 gpu_testing=$ongpu

__f = p -> sum(Base.Fix1(sum, abs2), first(rnn_seq(x, p, st)))
@eval @test_gradients $__f $ps atol=1e-2 rtol=1e-2 gpu_testing=$ongpu
end

ordering isa BatchLastIndex && continue
Expand All @@ -346,16 +341,11 @@ end
@test length(y_) == 4
@test all(x -> size(x) == (5,), y_)

if mode != "amdgpu"
__f = p -> sum(first(rnn(x, p, st)))
@eval @test_gradients $__f $ps atol=1e-2 rtol=1e-2 gpu_testing=$ongpu
__f = p -> sum(first(rnn(x, p, st)))
@eval @test_gradients $__f $ps atol=1e-2 rtol=1e-2 gpu_testing=$ongpu

__f = p -> sum(Base.Fix1(sum, abs2), first(rnn_seq(x, p, st)))
@eval @test_gradients $__f $ps atol=1e-2 rtol=1e-2 gpu_testing=$ongpu
else
# This is just added as a stub to remember about this broken test
@test_broken 1 + 1 == 1
end
__f = p -> sum(Base.Fix1(sum, abs2), first(rnn_seq(x, p, st)))
@eval @test_gradients $__f $ps atol=1e-2 rtol=1e-2 gpu_testing=$ongpu
end
end
end
Expand Down
6 changes: 3 additions & 3 deletions test/layers/type_stability_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -73,11 +73,11 @@
ps, st = Lux.setup(rng, model) |> dev
x = input |> dev

@inferred model(x, ps, st)
@inferred loss_function(model, x, ps, st)
if mode == "amdgpu" && (model isa Conv || model isa LayerNorm)
@test_broken false
@test_broken @inferred Zygote.gradient(loss_function, model, x, ps, st)
else
@inferred model(x, ps, st)
@inferred loss_function(model, x, ps, st)
@inferred Zygote.gradient(loss_function, model, x, ps, st)
end
end
Expand Down
Loading

0 comments on commit 465b40d

Please sign in to comment.