From f0ab4cb517a6687b5aa0ae78105ba5016ba1675e Mon Sep 17 00:00:00 2001 From: Samuele Marro Date: Mon, 4 May 2020 21:19:51 +0200 Subject: [PATCH] Added valid and fixed padding to conv2d layer (#43) Added valid and fixed padding to conv2d layer. Contributors: Adam Hillier Co-authored-by: Vincent Tjeng --- src/net_components/layers/conv2d.jl | 115 +++++- src/utils/import_weights.jl | 8 +- test/net_components/layers/conv2d.jl | 574 +++++++++++++++++++++++---- 3 files changed, 584 insertions(+), 113 deletions(-) diff --git a/src/net_components/layers/conv2d.jl b/src/net_components/layers/conv2d.jl index 0d352a79..6cfe1977 100644 --- a/src/net_components/layers/conv2d.jl +++ b/src/net_components/layers/conv2d.jl @@ -2,6 +2,15 @@ using JuMP using ConditionalJuMP export Conv2d +export Padding, SamePadding, ValidPadding + +struct SamePadding end +Base.show(io::IO, p::SamePadding) = print(io, "same") +struct ValidPadding end +Base.show(io::IO, p::ValidPadding) = print(io, "valid") + +FixedPadding = Union{Int, Tuple{Int, Int}, Tuple{Int, Int, Int, Int}} +Padding = Union{SamePadding, ValidPadding, FixedPadding} """ $(TYPEDEF) @@ -18,25 +27,30 @@ $(FIELDS) filter::Array{T, 4} bias::Array{U, 1} stride::V + padding::Padding - function Conv2d{T, U, V}(filter::Array{T, 4}, bias::Array{U, 1}, stride::V) where {T<:JuMPReal, U<:JuMPReal, V<:Int64} + function Conv2d{T, U, V}(filter::Array{T, 4}, bias::Array{U, 1}, stride::V, padding::Padding) where {T<:JuMPReal, U<:JuMPReal, V<:Int64} (filter_height, filter_width, filter_in_channels, filter_out_channels) = size(filter) bias_out_channels = length(bias) @assert( filter_out_channels == bias_out_channels, "For this convolution layer, number of output channels in filter, $filter_out_channels, does not match number of output channels in bias, $bias_out_channels." ) - return new(filter, bias, stride) + return new(filter, bias, stride, padding) end end +function Conv2d(filter::Array{T, 4}, bias::Array{U, 1}, stride::V, padding::Padding) where {T<:JuMPReal, U<:JuMPReal, V<:Int64} + Conv2d{T, U, V}(filter, bias, stride, padding) +end + function Conv2d(filter::Array{T, 4}, bias::Array{U, 1}, stride::V) where {T<:JuMPReal, U<:JuMPReal, V<:Int64} - Conv2d{T, U, V}(filter, bias, stride) + Conv2d{T, U, V}(filter, bias, stride, SamePadding()) end function Conv2d(filter::Array{T, 4}, bias::Array{U, 1}) where {T<:JuMPReal, U<:JuMPReal} - Conv2d(filter, bias, 1) + Conv2d(filter, bias, 1, SamePadding()) end """ @@ -59,8 +73,9 @@ end function Base.show(io::IO, p::Conv2d) (filter_height, filter_width, filter_in_channels, filter_out_channels) = size(p.filter) stride = p.stride + padding = p.padding print(io, - "Conv2d($filter_in_channels, $filter_out_channels, kernel_size=($(filter_height), $(filter_width)), stride=($(stride), $(stride)), padding=same)" + "Conv2d($filter_in_channels, $filter_out_channels, kernel_size=($(filter_height), $(filter_width)), stride=($(stride), $(stride)), padding=$(padding))" ) end @@ -86,15 +101,81 @@ function increment!(s::JuMP.AffExpr, input_val::Real, filter_val::JuMP.Variable) push!(s, Float64(input_val), filter_val) end +function compute_output_parameters( + in_height::Int, in_width::Int, + filter_height::Int, filter_width::Int, stride::Int, + padding::FixedPadding +)::Tuple{NTuple{2, Int}, NTuple{2, Int}} + (top_padding, bottom_padding, left_padding, right_padding) = compute_padding_values(padding) + out_height = round(Int, (in_height + top_padding + bottom_padding - filter_height) / stride, RoundDown) + 1 + out_width = round(Int, (in_width + left_padding + right_padding - filter_width) / stride, RoundDown) + 1 + + output_size = (out_height, out_width) + filter_offset = (top_padding, left_padding) + return (output_size, filter_offset) +end +function compute_output_parameters( + in_height::Int, in_width::Int, + filter_height::Int, filter_width::Int, stride::Int, + padding::SamePadding +)::Tuple{NTuple{2, Int}, NTuple{2, Int}} + out_height = round(Int, in_height/stride, RoundUp) + out_width = round(Int, in_width/stride, RoundUp) + pad_along_height = max((out_height - 1)*stride + filter_height - in_height, 0) + pad_along_width = max((out_width - 1)*stride + filter_width - in_width, 0) + filter_height_offset = round(Int, pad_along_height/2, RoundDown) + filter_width_offset = round(Int, pad_along_width/2, RoundDown) + + output_size = (out_height, out_width) + filter_offset = (filter_height_offset, filter_width_offset) + return (output_size, filter_offset) +end + +function compute_output_parameters( + in_height::Int, in_width::Int, + filter_height::Int, filter_width::Int, stride::Int, + padding::ValidPadding +)::Tuple{NTuple{2, Int}, NTuple{2, Int}} + out_height = round(Int, (in_height + 1 - filter_height) / stride, RoundUp) + out_width = round(Int, (in_width + 1 - filter_width) / stride, RoundUp) + return((out_height, out_width), (0, 0)) +end + +function compute_padding_values( + padding::Int +)::NTuple{4, Int} + return (padding, padding, padding, padding) +end + +function compute_padding_values( + padding::NTuple{2, Int} +)::NTuple{4, Int} + (y_padding, x_padding) = padding + return (y_padding, y_padding, x_padding, x_padding) +end + +function compute_padding_values( + padding::NTuple{4, Int} +)::NTuple{4, Int} + return padding +end """ $(SIGNATURES) Computes the result of convolving `input` with the `filter` and `bias` stored in `params`. -Mirrors `tf.nn.conv2d` from the `tensorflow` package, with `strides = [1, 1, 1, 1], -padding = 'SAME'`. +Mirrors `tf.nn.conv2d` from the `tensorflow` package, with +`strides = [1, params.stride, params.stride, 1]`. + +Supports three types of padding: +- 'same': Specify via `SamePadding()`. Padding is added so that the output has the same size as the input. +- 'valid': Specify via `FixedPadding()`. No padding is added. +- 'fixed': Specify via: + - A single integer, interpreted as padding for both axes + - A tuple of two integers, interpreted as (y_padding, x_padding) + - A tuple of four integers, interpreted as (top, bottom, left, right) # Throws * AssertionError if `input` and `filter` are not compatible. @@ -104,34 +185,24 @@ function conv2d( params::Conv2d{U, V}) where {T<:JuMPReal, U<:JuMPReal, V<:JuMPReal} if T<:JuMPLinearType || U<:JuMPLinearType || V<:JuMPLinearType - Memento.info(MIPVerify.LOGGER, "Applying $(params) ... ") + info(MIPVerify.LOGGER, "Applying $(params) ... ") end filter = params.filter stride = params.stride + padding = params.padding (batch, in_height, in_width, input_in_channels) = size(input) (filter_height, filter_width, filter_in_channels, filter_out_channels) = size(filter) @assert( - input_in_channels == filter_in_channels, + input_in_channels == filter_in_channels, "Number of channels in input, $input_in_channels, does not match number of channels, $filter_in_channels, that filters operate on." ) - - out_height = round(Int, in_height/stride, RoundUp) - out_width = round(Int, in_width/stride, RoundUp) - output_size = (batch, out_height, out_width, filter_out_channels) # Considered using offset arrays here, but could not get it working. + ((out_height, out_width), (filter_height_offset, filter_width_offset)) = compute_output_parameters(in_height, in_width, filter_height, filter_width, stride, padding) + output_size = (batch, out_height, out_width, filter_out_channels) - # Calculating appropriate offsets so that center of kernel is matched with - # cell at which correlation is being calculated. Note that tensorflow - # chooses a specific convention for a dimension with even size which we - # replicate here. - pad_along_height = max((out_height - 1)*stride + filter_height - in_height, 0) - pad_along_width = max((out_width - 1)*stride + filter_width - in_width, 0) - filter_height_offset = round(Int, pad_along_height/2, RoundDown) - filter_width_offset = round(Int, pad_along_width/2, RoundDown) - W = Base.promote_op(+, V, Base.promote_op(*, T, U)) output = Array{W}(undef, output_size) diff --git a/src/utils/import_weights.jl b/src/utils/import_weights.jl index a8a09df1..9e0a3585 100644 --- a/src/utils/import_weights.jl +++ b/src/utils/import_weights.jl @@ -3,7 +3,7 @@ export get_matrix_params, get_conv_params, get_example_network_params """ $(SIGNATURES) -Helper function to import the parameters for a layer carrying out matrix multiplication +Helper function to import the parameters for a layer carrying out matrix multiplication (e.g. fully connected layer / softmax layer) from `param_dict` as a [`Linear`](@ref) object. @@ -62,13 +62,15 @@ function get_conv_params( expected_size::NTuple{4, Int}; matrix_name::String = "weight", bias_name::String = "bias", - expected_stride::Integer = 1 + expected_stride::Integer = 1, + padding::Padding = SamePadding() )::Conv2d params = Conv2d( param_dict["$layer_name/$matrix_name"], dropdims(param_dict["$layer_name/$bias_name"], dims=1), - expected_stride + expected_stride, + padding ) check_size(params, expected_size) diff --git a/test/net_components/layers/conv2d.jl b/test/net_components/layers/conv2d.jl index da134dae..07488362 100644 --- a/test/net_components/layers/conv2d.jl +++ b/test/net_components/layers/conv2d.jl @@ -4,8 +4,101 @@ using MIPVerify using MIPVerify: check_size, increment! @isdefined(TestHelpers) || include("../../TestHelpers.jl") -@testset "conv2d.jl" begin +function test_convolution_layer( + p::MIPVerify.Conv2d, + input::AbstractArray{T, 4}, + expected_output::AbstractArray{T, 4} + ) where {T<:Real} + """ + Tests that passing `input` into the convolution layer `p` produces + `expected_output`. We test three combinations: + + 1) Passing numerical input into a numerical Conv2d layer, verifying + that we recover the value of `expected_output`. + + 2) Setting up an optimization problem with variables corresponding + to the convolution layer and the output (`p_v` and `output_v`). + + `output_v` is constrained to be the result of applying `p_v` to + `input`, and is also constrained to be equal to `expected_output`. + + We verify that, when the optimization problem is solved, applying + `p_v` to `input` recovers the value of `expected_output`. + + Note that since the optimization problem is under-determined, we + cannot assert that `p_v` is equal to `p`. + 3) Setting up an optimization problem with variables corresponding + to the input and output (`input_v` and `output_v`). + + `output_v` is constrained to be the result of applying `p` to + `input_v`, and is also constrained to be equal to `expected_output`. + + We verify that, when the optimization problem is solved, applying + `p` to `input_v` recovers the value of `expected_output`. + + As in case 2), we cannot assert that `input_v` is equal to input. + """ + input_size = size(input) + filter_size = size(p.filter) + bias_size = size(p.bias) + @testset "Numerical Input, Numerical Layer Parameters" begin + evaluated_output = MIPVerify.conv2d(input, p) + @test evaluated_output == expected_output + end + @testset "Numerical Input, Variable Layer Parameters" begin + m = TestHelpers.get_new_model() + filter_v = map(_ -> @variable(m), CartesianIndices(filter_size)) + bias_v = map(_ -> @variable(m), CartesianIndices(bias_size)) + p_v = MIPVerify.Conv2d(filter_v, bias_v, p.stride, p.padding) + output_v = MIPVerify.conv2d(input, p_v) + @constraint(m, output_v .== expected_output) + solve(m) + + p_solve = MIPVerify.Conv2d(getvalue(filter_v), getvalue(bias_v), p.stride, p.padding) + solve_output = MIPVerify.conv2d(input, p_solve) + @test solve_output≈expected_output + end + @testset "Variable Input, Numerical Layer Parameters" begin + m = TestHelpers.get_new_model() + input_v = map(_ -> @variable(m), CartesianIndices(input_size)) + output_v = MIPVerify.conv2d(input_v, p) + @constraint(m, output_v .== expected_output) + solve(m) + + solve_output = MIPVerify.conv2d(getvalue(input_v), p) + @test solve_output≈expected_output + end +end + +function test_convolution_layer_with_default_values( + input_size::NTuple{4, Int}, + filter_size::NTuple{4, Int}, + expected_output_2d::AbstractArray{T, 2}, + stride::Int, + padding::Padding + ) where {T<:Real} + """ + Generates test input of dimension `input_size`, and a Conv2d layer with + a filter of dimension `filter_size` and specified `stride` and `padding`, + running `test_convolution_layer` to ensure that passing the generated + `input` into the generated Conv2d layer produces `expected_output_2d`. + + + The input generated consists of natural numbers in increasing order from + left to right and then top to bottom. + + The filter generated is all 1s, and the convolution layer has bias 0. + + For convenience, the expected output only needs to be specified with the + non-singleton dimensions. + """ + input = reshape([1:prod(input_size);], input_size) + filter = ones(filter_size...) + bias = [0] + expected_output = reshape(expected_output_2d, (1, size(expected_output_2d)..., 1)) + p = Conv2d(filter, bias, stride, padding) + test_convolution_layer(p, input, expected_output) +end + +@testset "conv2d.jl" begin @testset "Conv2d" begin @testset "Base.show" begin filter = ones(3, 3, 2, 5) @@ -77,43 +170,16 @@ using MIPVerify: check_size, increment! input = reshape(collect(1:prod(input_size)), input_size) .- 16 filter_size = (3, 3, 2, 1) filter = reshape(collect(1:prod(filter_size)), filter_size) .- 9 - bias_size = (1, ) bias = [1] - true_output_raw = [ + expected_output_2d = [ 225 381 405 285; 502 787 796 532; 550 823 832 532; 301 429 417 249; ] - true_output = reshape(transpose(true_output_raw), (1, 4, 4, 1)) + expected_output = reshape(transpose(expected_output_2d), (1, 4, 4, 1)) p = Conv2d(filter, bias) - @testset "Numerical Input, Numerical Layer Parameters" begin - evaluated_output = MIPVerify.conv2d(input, p) - @test evaluated_output == true_output - end - @testset "Numerical Input, Variable Layer Parameters" begin - m = TestHelpers.get_new_model() - filter_v = map(_ -> @variable(m), CartesianIndices(filter_size)) - bias_v = map(_ -> @variable(m), CartesianIndices(bias_size)) - p_v = Conv2d(filter_v, bias_v) - output_v = MIPVerify.conv2d(input, p_v) - @constraint(m, output_v .== true_output) - solve(m) - - p_solve = MIPVerify.Conv2d(getvalue(filter_v), getvalue(bias_v)) - solve_output = MIPVerify.conv2d(input, p_solve) - @test solve_output≈true_output - end - @testset "Variable Input, Numerical Layer Parameters" begin - m = TestHelpers.get_new_model() - input_v = map(_ -> @variable(m), CartesianIndices(input_size)) - output_v = MIPVerify.conv2d(input_v, p) - @constraint(m, output_v .== true_output) - solve(m) - - solve_output = MIPVerify.conv2d(getvalue(input_v), p) - @test solve_output≈true_output - end + test_convolution_layer(p, input, expected_output) end @testset "conv2d with non-unit stride" begin @@ -121,43 +187,16 @@ using MIPVerify: check_size, increment! input = reshape(collect(1:prod(input_size)), input_size) .- 36 filter_size = (3, 3, 2, 1) filter = reshape(collect(1:prod(filter_size)), filter_size) .- 9 - bias_size = (1, ) bias = [1] stride = 2 - true_output_raw = [ + expected_output_2d = [ 1597 1615 1120; 1705 1723 1120; 903 879 513 ; ] - true_output = reshape(transpose(true_output_raw), (1, 3, 3, 1)) + expected_output = reshape(transpose(expected_output_2d), (1, 3, 3, 1)) p = Conv2d(filter, bias, stride) - @testset "Numerical Input, Numerical Layer Parameters" begin - evaluated_output = MIPVerify.conv2d(input, p) - @test evaluated_output == true_output - end - @testset "Numerical Input, Variable Layer Parameters" begin - m = TestHelpers.get_new_model() - filter_v = map(_ -> @variable(m), CartesianIndices(filter_size)) - bias_v = map(_ -> @variable(m), CartesianIndices(bias_size)) - p_v = Conv2d(filter_v, bias_v, stride) - output_v = MIPVerify.conv2d(input, p_v) - @constraint(m, output_v .== true_output) - solve(m) - - p_solve = MIPVerify.Conv2d(getvalue(filter_v), getvalue(bias_v), stride) - solve_output = MIPVerify.conv2d(input, p_solve) - @test solve_output≈true_output - end - @testset "Variable Input, Numerical Layer Parameters" begin - m = TestHelpers.get_new_model() - input_v = map(_ -> @variable(m), CartesianIndices(input_size)) - output_v = MIPVerify.conv2d(input_v, p) - @constraint(m, output_v .== true_output) - solve(m) - - solve_output = MIPVerify.conv2d(getvalue(input_v), p) - @test solve_output≈true_output - end + test_convolution_layer(p, input, expected_output) end @testset "conv2d with stride 2, odd input shape with even filter shape" begin @@ -165,42 +204,401 @@ using MIPVerify: check_size, increment! input = reshape(collect(1:prod(input_size)), input_size) .- 25 filter_size = (4, 4, 2, 1) filter = reshape(collect(1:prod(filter_size)), filter_size) .- 16 - bias_size = (1, ) bias = [1] stride = 2 - true_output_raw = [ + expected_output_2d = [ 1756 2511 1310; 3065 4097 1969; 1017 1225 501 ; ] - true_output = reshape(transpose(true_output_raw), (1, 3, 3, 1)) + expected_output = reshape(transpose(expected_output_2d), (1, 3, 3, 1)) p = Conv2d(filter, bias, stride) - @testset "Numerical Input, Numerical Layer Parameters" begin - evaluated_output = MIPVerify.conv2d(input, p) - @test evaluated_output == true_output + test_convolution_layer(p, input, expected_output) + end + + @testset "conv2d with 'valid' padding" begin + @testset "conv2d with 'valid' padding, odd input and filter size, stride = 1" begin + expected_output_2d = [ + 63 72 81; + 108 117 126; + 153 162 171 + ] + test_convolution_layer_with_default_values( + (1, 5, 5, 1), + (3, 3, 1, 1), + transpose(expected_output_2d), + 1, + ValidPadding() + ) + end + + @testset "conv2d with 'valid' padding, odd input and filter size, stride = 1, channels != 1" begin + expected_output_2d = [ + 351 369 387; + 441 459 477; + 531 549 567 + ] + test_convolution_layer_with_default_values( + (1, 5, 5, 2), + (3, 3, 2, 1), + transpose(expected_output_2d), + 1, + ValidPadding() + ) + end + + @testset "conv2d with 'valid' padding, stride = 1, input width != input height" begin + expected_output_2d = [ + 63 72 81; + 108 117 126; + 153 162 171; + 198 207 216 + ] + test_convolution_layer_with_default_values( + (1, 5, 6, 1), + (3, 3, 1, 1), + transpose(expected_output_2d), + 1, + ValidPadding() + ) + end + + @testset "conv2d with 'valid' padding, stride = 1, filter width != filter height" begin + expected_output_2d = [ + 39 45 51 57; + 69 75 81 87; + 99 105 111 117 + ] + test_convolution_layer_with_default_values( + (1, 5, 5, 1), + (2, 3, 1, 1), + transpose(expected_output_2d), + 1, + ValidPadding() + ) + end + + @testset "conv2d with 'valid' padding, odd input and filter size, stride != 1" begin + expected_output_2d = [ + 63 81; + 153 171 + ] + test_convolution_layer_with_default_values( + (1, 5, 5, 1), + (3, 3, 1, 1), + transpose(expected_output_2d), + 2, + ValidPadding() + ) + end + + @testset "conv2d with 'valid' padding, odd input size, even filter size, stride = 1" begin + expected_output_2d = [ + 16 20 24 28; + 36 40 44 48; + 56 60 64 68; + 76 80 84 88 + ] + test_convolution_layer_with_default_values( + (1, 5, 5, 1), + (2, 2, 1, 1), + transpose(expected_output_2d), + 1, + ValidPadding() + ) + end + + @testset "conv2d with 'valid' padding, odd input size, even filter size, stride != 1" begin + expected_output_2d = [ + 16 24; + 56 64 + ] + test_convolution_layer_with_default_values( + (1, 5, 5, 1), + (2, 2, 1, 1), + transpose(expected_output_2d), + 2, + ValidPadding() + ) + end + + @testset "conv2d with 'valid' padding, even input size, odd filter size, stride = 1" begin + expected_output_2d = [ + 72 81 90 99; + 126 135 144 153; + 180 189 198 207; + 234 243 252 261 + ] + test_convolution_layer_with_default_values( + (1, 6, 6, 1), + (3, 3, 1, 1), + transpose(expected_output_2d), + 1, + ValidPadding() + ) + end + + @testset "conv2d with 'valid' padding, even input size, odd filter size, stride != 1" begin + expected_output_2d = [ + 72 90; + 180 198 + ] + test_convolution_layer_with_default_values( + (1, 6, 6, 1), + (3, 3, 1, 1), + transpose(expected_output_2d), + 2, + ValidPadding() + ) + end + + @testset "conv2d with 'valid' padding, even input and filter size, stride = 1" begin + expected_output_2d = [ + 18 22 26 30 34; + 42 46 50 54 58; + 66 70 74 78 82; + 90 94 98 102 106; + 114 118 122 126 130 + ] + test_convolution_layer_with_default_values( + (1, 6, 6, 1), + (2, 2, 1, 1), + transpose(expected_output_2d), + 1, + ValidPadding() + ) + end + + @testset "conv2d with 'valid' padding, even input and filter size, stride != 1" begin + expected_output_2d = [ + 18 30; + 90 102 + ] + test_convolution_layer_with_default_values( + (1, 6, 6, 1), + (2, 2, 1, 1), + transpose(expected_output_2d), + 3, + ValidPadding() + ) + end + end + + @testset "conv2d wit fixed padding" begin + @testset "conv2d with (0, 0) padding, stride = 1" begin + expected_output_2d = [ + 63 72 81; + 108 117 126; + 153 162 171 + ] + test_convolution_layer_with_default_values( + (1, 5, 5, 1), + (3, 3, 1, 1), + transpose(expected_output_2d), + 1, + (0, 0) + ) + end + + @testset "conv2d with (0, 0) padding, stride != 1" begin + expected_output_2d = [ + 63 81; + 153 171; + ] + test_convolution_layer_with_default_values( + (1, 5, 5, 1), + (3, 3, 1, 1), + transpose(expected_output_2d), + 2, + (0, 0) + ) + end + + @testset "conv2d with (1, 1) padding, stride = 1" begin + expected_output_2d = [ + 16 27 33 39 28; + 39 63 72 81 57; + 69 108 117 126 87; + 99 153 162 171 117; + 76 117 123 129 88 + ] + test_convolution_layer_with_default_values( + (1, 5, 5, 1), + (3, 3, 1, 1), + transpose(expected_output_2d), + 1, + (1, 1) + ) + end + + @testset "conv2d with (1, 1) padding, stride != 1" begin + expected_output_2d = [ + 16 33 28; + 69 117 87; + 76 123 88 + ] + test_convolution_layer_with_default_values( + (1, 5, 5, 1), + (3, 3, 1, 1), + transpose(expected_output_2d), + 2, + (1, 1) + ) + end + + @testset "conv2d with 1 padding, stride = 1" begin + expected_output_2d = [ + 16 27 33 39 28; + 39 63 72 81 57; + 69 108 117 126 87; + 99 153 162 171 117; + 76 117 123 129 88 + ] + test_convolution_layer_with_default_values( + (1, 5, 5, 1), + (3, 3, 1, 1), + transpose(expected_output_2d), + 1, + 1 + ) + end + + @testset "conv2d with (1, 1) padding, input width != input_height, stride = 1" begin + expected_output_2d = [ + 18 30 36 42 48 34; + 45 72 81 90 99 69; + 81 126 135 144 153 105; + 117 180 189 198 207 141; + 90 138 144 150 156 106; + ] + test_convolution_layer_with_default_values( + (1, 6, 5, 1), + (3, 3, 1, 1), + transpose(expected_output_2d), + 1, + (1, 1) + ) + end + + @testset "conv2d with (1, 1) padding, input width != input_height, stride != 1" begin + expected_output_2d = [ + 18 36 48; + 81 135 153; + 90 144 156 + ] + test_convolution_layer_with_default_values( + (1, 6, 5, 1), + (3, 3, 1, 1), + transpose(expected_output_2d), + 2, + (1, 1) + ) + end + + @testset "conv2d with (1, 2) padding, stride = 1" begin + expected_output_2d = [ + 3 6 9 12 9; + 16 27 33 39 28; + 39 63 72 81 57; + 69 108 117 126 87; + 99 153 162 171 117; + 76 117 123 129 88; + 43 66 69 72 49 + ] + test_convolution_layer_with_default_values( + (1, 5, 5, 1), + (3, 3, 1, 1), + transpose(expected_output_2d), + 1, + (1, 2) + ) + end + + @testset "conv2d with (1, 2) padding, stride != 1" begin + expected_output_2d = [ + 3 9 9; + 39 72 57; + 99 162 117; + 43 69 49 + ] + test_convolution_layer_with_default_values( + (1, 5, 5, 1), + (3, 3, 1, 1), + transpose(expected_output_2d), + 2, + (1, 2) + ) + end + + @testset "conv2d with (1, 1) padding, channels != 1, stride = 1" begin + expected_output_2d = [ + 132 204 216 228 156; + 228 351 369 387 264; + 288 441 459 477 324; + 348 531 549 567 384; + 252 384 396 408 276 + ] + test_convolution_layer_with_default_values( + (1, 5, 5, 2), + (3, 3, 2, 1), + transpose(expected_output_2d), + 1, + (1, 1) + ) + end + + @testset "conv2d with (1, 1) padding, channels != 1, stride != 1" begin + expected_output_2d = [ + 132 216 156; + 288 459 324; + 252 396 276 + ] + test_convolution_layer_with_default_values( + (1, 5, 5, 2), + (3, 3, 2, 1), + transpose(expected_output_2d), + 2, + (1, 1) + ) + end + + @testset "conv2d with (1, 1, 1, 1) padding, stride = 1" begin + expected_output_2d = [ + 16 27 33 39 28; + 39 63 72 81 57; + 69 108 117 126 87; + 99 153 162 171 117; + 76 117 123 129 88 + ] + test_convolution_layer_with_default_values( + (1, 5, 5, 1), + (3, 3, 1, 1), + transpose(expected_output_2d), + 1, + (1, 1, 1, 1) + ) end - @testset "Numerical Input, Variable Layer Parameters" begin - m = TestHelpers.get_new_model() - filter_v = map(_ -> @variable(m), CartesianIndices(filter_size)) - bias_v = map(_ -> @variable(m), CartesianIndices(bias_size)) - p_v = Conv2d(filter_v, bias_v, stride) - output_v = MIPVerify.conv2d(input, p_v) - @constraint(m, output_v .== true_output) - solve(m) - - p_solve = MIPVerify.Conv2d(getvalue(filter_v), getvalue(bias_v), stride) - solve_output = MIPVerify.conv2d(input, p_solve) - @test solve_output≈true_output - end - @testset "Variable Input, Numerical Layer Parameters" begin - m = TestHelpers.get_new_model() - input_v = map(_ -> @variable(m), CartesianIndices(input_size)) - output_v = MIPVerify.conv2d(input_v, p) - @constraint(m, output_v .== true_output) - solve(m) - solve_output = MIPVerify.conv2d(getvalue(input_v), p) - @test solve_output≈true_output + @testset "conv2d with (1, 2, 3, 4) padding, stride = 1" begin + expected_output_2d = [ + 0 0 0 0 0 0; + 3 6 9 12 9 5; + 16 27 33 39 28 15; + 39 63 72 81 57 30; + 69 108 117 126 87 45; + 99 153 162 171 117 60; + 76 117 123 129 88 45; + 43 66 69 72 49 25; + 0 0 0 0 0 0; + 0 0 0 0 0 0 + ] + test_convolution_layer_with_default_values( + (1, 5, 5, 1), + (3, 3, 1, 1), + transpose(expected_output_2d), + 1, + (1, 2, 3, 4) + ) end end end \ No newline at end of file