Skip to content

Commit

Permalink
pnnx conversion
Browse files Browse the repository at this point in the history
  • Loading branch information
nihui committed Oct 31, 2023
1 parent 1623ec9 commit c78bb2b
Show file tree
Hide file tree
Showing 3 changed files with 223 additions and 8 deletions.
2 changes: 1 addition & 1 deletion src/layer/x86/deconvolutiondepthwise_x86.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -601,7 +601,7 @@ int DeconvolutionDepthWise_x86::forward(const std::vector<Mat>& bottom_blobs, st
// transpose group-inch/group-outch/group-kh-kw to group-outch/group-inch/group-kh-kw
Mat weight_data_transposed;
{
weight_data_transposed.create(_kernel_w * _kernel_h * _num_output * _num_input / group, weight_data_flattened.elemsize, opt.workspace_allocator);
weight_data_transposed.create(_kernel_w * _kernel_h * _num_output * _num_input / group, 4u, opt.workspace_allocator);
if (weight_data_transposed.empty())
return -100;

Expand Down
208 changes: 208 additions & 0 deletions tools/pnnx/src/pass_ncnn/F_conv_transpose2d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,214 @@ namespace pnnx {

namespace ncnn {

class F_conv_transpose2d_4 : public GraphRewriterPass
{
public:
const char* match_pattern_graph() const
{
return R"PNNXIR(7767517
4 3
pnnx.Input input 0 1 input
pnnx.Input weight 0 1 weight
F.conv_transpose2d op_0 2 1 input weight out bias=None stride=%stride output_padding=%output_padding padding=%padding dilation=%dilation groups=1
pnnx.Output output 1 0 out
)PNNXIR";
}

const char* type_str() const
{
return "Deconvolution";
}

const char* name_str() const
{
return "deconv2d";
}

void write(Operator* op, const std::map<std::string, Parameter>& captured_params, const std::map<std::string, Attribute>& /*captured_attrs*/) const
{
std::vector<int> weight_shape = op->inputs[1]->shape;
if (weight_shape.empty())
{
weight_shape = {0, 0, 0, 0};
}

op->params["0"] = weight_shape[1];
op->params["1"] = weight_shape[3];
op->params["11"] = weight_shape[2];
op->params["2"] = captured_params.at("dilation").ai[1];
op->params["12"] = captured_params.at("dilation").ai[0];
op->params["3"] = captured_params.at("stride").ai[1];
op->params["13"] = captured_params.at("stride").ai[0];
op->params["4"] = captured_params.at("padding").ai[1];
op->params["14"] = captured_params.at("padding").ai[0];
op->params["18"] = captured_params.at("output_padding").ai[1];
op->params["19"] = captured_params.at("output_padding").ai[0];
op->params["5"] = 0;
op->params["6"] = (int)(weight_shape[0] * weight_shape[1] * weight_shape[2] * weight_shape[3]);
op->params["28"] = 1; // dynamic weight
}
};

REGISTER_GLOBAL_PNNX_NCNN_GRAPH_REWRITER_PASS(F_conv_transpose2d_4, 22)

class F_conv_transpose2d_5 : public GraphRewriterPass
{
public:
const char* match_pattern_graph() const
{
return R"PNNXIR(7767517
5 4
pnnx.Input input 0 1 input
pnnx.Input weight 0 1 weight
pnnx.Input bias 0 1 bias
F.conv_transpose2d op_0 3 1 input weight bias out stride=%stride output_padding=%output_padding padding=%padding dilation=%dilation groups=1
pnnx.Output output 1 0 out
)PNNXIR";
}

const char* type_str() const
{
return "Deconvolution";
}

const char* name_str() const
{
return "deconv2d";
}

void write(Operator* op, const std::map<std::string, Parameter>& captured_params, const std::map<std::string, Attribute>& /*captured_attrs*/) const
{
std::vector<int> weight_shape = op->inputs[1]->shape;
if (weight_shape.empty())
{
weight_shape = {0, 0, 0, 0};
}

op->params["0"] = weight_shape[1];
op->params["1"] = weight_shape[3];
op->params["11"] = weight_shape[2];
op->params["2"] = captured_params.at("dilation").ai[1];
op->params["12"] = captured_params.at("dilation").ai[0];
op->params["3"] = captured_params.at("stride").ai[1];
op->params["13"] = captured_params.at("stride").ai[0];
op->params["4"] = captured_params.at("padding").ai[1];
op->params["14"] = captured_params.at("padding").ai[0];
op->params["18"] = captured_params.at("output_padding").ai[1];
op->params["19"] = captured_params.at("output_padding").ai[0];
op->params["5"] = 1;
op->params["6"] = (int)(weight_shape[0] * weight_shape[1] * weight_shape[2] * weight_shape[3]);
op->params["28"] = 1; // dynamic weight
}
};

REGISTER_GLOBAL_PNNX_NCNN_GRAPH_REWRITER_PASS(F_conv_transpose2d_5, 22)

class F_conv_transpose2d_6 : public GraphRewriterPass
{
public:
const char* match_pattern_graph() const
{
return R"PNNXIR(7767517
4 3
pnnx.Input input 0 1 input
pnnx.Input weight 0 1 weight
F.conv_transpose2d op_0 2 1 input weight out bias=None stride=%stride output_padding=%output_padding padding=%padding dilation=%dilation groups=%groups
pnnx.Output output 1 0 out
)PNNXIR";
}

const char* type_str() const
{
return "DeconvolutionDepthWise";
}

const char* name_str() const
{
return "deconvdw2d";
}

void write(Operator* op, const std::map<std::string, Parameter>& captured_params, const std::map<std::string, Attribute>& /*captured_attrs*/) const
{
std::vector<int> weight_shape = op->inputs[1]->shape;
if (weight_shape.empty())
{
weight_shape = {0, 0, 0, 0};
}

op->params["0"] = weight_shape[1] * captured_params.at("groups").i;
op->params["1"] = weight_shape[3];
op->params["11"] = weight_shape[2];
op->params["2"] = captured_params.at("dilation").ai[1];
op->params["12"] = captured_params.at("dilation").ai[0];
op->params["3"] = captured_params.at("stride").ai[1];
op->params["13"] = captured_params.at("stride").ai[0];
op->params["4"] = captured_params.at("padding").ai[1];
op->params["14"] = captured_params.at("padding").ai[0];
op->params["18"] = captured_params.at("output_padding").ai[1];
op->params["19"] = captured_params.at("output_padding").ai[0];
op->params["5"] = 0;
op->params["6"] = (int)(weight_shape[0] * weight_shape[1] * weight_shape[2] * weight_shape[3]);
op->params["7"] = captured_params.at("groups");
op->params["28"] = 1; // dynamic weight
}
};

REGISTER_GLOBAL_PNNX_NCNN_GRAPH_REWRITER_PASS(F_conv_transpose2d_6, 23)

class F_conv_transpose2d_7 : public GraphRewriterPass
{
public:
const char* match_pattern_graph() const
{
return R"PNNXIR(7767517
5 4
pnnx.Input input 0 1 input
pnnx.Input weight 0 1 weight
pnnx.Input bias 0 1 bias
F.conv_transpose2d op_0 3 1 input weight bias out stride=%stride output_padding=%output_padding padding=%padding dilation=%dilation groups=%groups
pnnx.Output output 1 0 out
)PNNXIR";
}

const char* type_str() const
{
return "DeconvolutionDepthWise";
}

const char* name_str() const
{
return "deconvdw2d";
}

void write(Operator* op, const std::map<std::string, Parameter>& captured_params, const std::map<std::string, Attribute>& /*captured_attrs*/) const
{
std::vector<int> weight_shape = op->inputs[1]->shape;
if (weight_shape.empty())
{
weight_shape = {0, 0, 0, 0};
}

op->params["0"] = weight_shape[1] * captured_params.at("groups").i;
op->params["1"] = weight_shape[3];
op->params["11"] = weight_shape[2];
op->params["2"] = captured_params.at("dilation").ai[1];
op->params["12"] = captured_params.at("dilation").ai[0];
op->params["3"] = captured_params.at("stride").ai[1];
op->params["13"] = captured_params.at("stride").ai[0];
op->params["4"] = captured_params.at("padding").ai[1];
op->params["14"] = captured_params.at("padding").ai[0];
op->params["18"] = captured_params.at("output_padding").ai[1];
op->params["19"] = captured_params.at("output_padding").ai[0];
op->params["5"] = 1;
op->params["6"] = (int)(weight_shape[0] * weight_shape[1] * weight_shape[2] * weight_shape[3]);
op->params["7"] = captured_params.at("groups");
op->params["28"] = 1; // dynamic weight
}
};

REGISTER_GLOBAL_PNNX_NCNN_GRAPH_REWRITER_PASS(F_conv_transpose2d_7, 23)

} // namespace ncnn

} // namespace pnnx
21 changes: 14 additions & 7 deletions tools/pnnx/tests/ncnn/test_F_conv_transpose2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,33 +24,40 @@ def __init__(self):
self.b2 = nn.Parameter(torch.rand(12))
self.w3 = nn.Parameter(torch.rand(12, 2, 3, 3))

def forward(self, y):
def forward(self, x, w0, w1, b1, y):
x = F.conv_transpose2d(x, w0, None, stride=(2,2), padding=(1,1), output_padding=(1,1))
x = F.conv_transpose2d(x, w1, b1, stride=(1,2), padding=(2,1), dilation=(2,1), groups=2)

y = F.conv_transpose2d(y, self.w2, self.b2, stride=(2,2), padding=(1,1), output_padding=(1,1))
y = F.conv_transpose2d(y, self.w3, None, stride=(1,2), padding=(2,1), dilation=(2,1), groups=3)
return y
return x, y

def test():
net = Model().half().float()
net.eval()

torch.manual_seed(0)
x = torch.rand(1, 12, 22, 32)
w0 = torch.rand(12, 16, 3, 3)
w1 = torch.rand(16, 8, 5, 5)
b1 = torch.rand(16)
y = torch.rand(1, 6, 5, 6)

a = net(y)
a0, a1 = net(x, w0, w1, b1, y)

# export torchscript
mod = torch.jit.trace(net, y)
mod = torch.jit.trace(net, (x, w0, w1, b1, y))
mod.save("test_F_conv_transpose2d.pt")

# torchscript to pnnx
import os
os.system("../../src/pnnx test_F_conv_transpose2d.pt inputshape=[1,6,5,6]")
os.system("../../src/pnnx test_F_conv_transpose2d.pt inputshape=[1,12,22,32],[12,16,3,3],[16,8,5,5],[16],[1,6,5,6]")

# ncnn inference
import test_F_conv_transpose2d_ncnn
b = test_F_conv_transpose2d_ncnn.test_inference()
b0, b1 = test_F_conv_transpose2d_ncnn.test_inference()

return torch.allclose(a, b, 1e-4, 1e-4)
return torch.allclose(a0, b0, 1e-4, 1e-4) and torch.allclose(a1, b1, 1e-4, 1e-4)

if __name__ == "__main__":
if test():
Expand Down

0 comments on commit c78bb2b

Please sign in to comment.