Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

OpConverter change BlockDesc to proto::BlockDesc #10623

Merged
merged 3 commits into from
May 14, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/convert/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
nv_test(test_op_converter SRCS test_op_converter.cc mul_op.cc conv2d_op.cc DEPS ${FLUID_CORE_MODULES})
nv_test(test_op_converter SRCS test_op_converter.cc mul_op.cc conv2d_op.cc op_converter.h DEPS ${FLUID_CORE_MODULES})
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

cmake里不需要加op_converter.h

nv_test(test_trt_activation_op SRCS test_activation_op.cc activation_op.cc
DEPS ${FLUID_CORE_MODULES} activation_op tensorrt_engine)
nv_test(test_io_converter SRCS test_io_converter.cc io_converter.cc DEPS dynload_cuda dynamic_loader lod_tensor)
9 changes: 6 additions & 3 deletions paddle/fluid/inference/tensorrt/convert/activation_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,18 @@ namespace tensorrt {
class ReluOpConverter : public OpConverter {
public:
ReluOpConverter() {}
void operator()(const framework::OpDesc& op) override {
void operator()(const framework::proto::OpDesc& op) override {
// Here the two nullptr looks strange, that's because the
// framework::OpDesc's constructor is strange.
framework::OpDesc op_desc(op, nullptr, nullptr);
LOG(INFO) << "convert a fluid relu op to tensorrt activation layer whose "
"type is Relu";
const nvinfer1::ITensor* input_tensor =
engine_->GetITensor(op.Input("X")[0]);
engine_->GetITensor(op_desc.Input("X")[0]);
nvinfer1::IActivationLayer* layer = TRT_ENGINE_ADD_LAYER(
engine_, Activation, *const_cast<nvinfer1::ITensor*>(input_tensor),
nvinfer1::ActivationType::kRELU);
engine_->SetITensor(op.Output("Out")[0], layer->getOutput(0));
engine_->SetITensor(op_desc.Output("Out")[0], layer->getOutput(0));
}
};

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/convert/conv2d_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ namespace tensorrt {
class Conv2dOpConverter : public OpConverter {
public:
Conv2dOpConverter() {}
void operator()(const framework::OpDesc& op) override {
void operator()(const framework::proto::OpDesc& op) override {
LOG(INFO)
<< "convert a fluid conv2d op to tensorrt conv layer without bias";
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/convert/mul_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ namespace tensorrt {
class MulOpConverter : public OpConverter {
public:
MulOpConverter() {}
void operator()(const framework::OpDesc& op) override {
void operator()(const framework::proto::OpDesc& op) override {
LOG(INFO) << "convert a fluid mul op to tensorrt fc layer without bias";
}
};
Expand Down
16 changes: 9 additions & 7 deletions paddle/fluid/inference/tensorrt/convert/op_converter.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,25 +31,27 @@ namespace tensorrt {
class OpConverter {
public:
OpConverter() {}
virtual void operator()(const framework::OpDesc& op) {}
virtual void operator()(const framework::proto::OpDesc& op) {}

void Run(const framework::OpDesc& op, TensorRTEngine* engine) {
std::string type = op.Type();
void Run(const framework::proto::OpDesc& op, TensorRTEngine* engine) {
std::string type = op.type();
auto* it = Registry<OpConverter>::Lookup(type);
PADDLE_ENFORCE_NOT_NULL(it, "no OpConverter for optype [%s]", type);
it->SetEngine(engine);
(*it)(op);
}

// convert fluid op to tensorrt layer
void ConvertOp(const framework::OpDesc& op, TensorRTEngine* engine) {
void ConvertOp(const framework::proto::OpDesc& op, TensorRTEngine* engine) {
OpConverter::Run(op, engine);
}

// convert fluid block to tensorrt network
void ConvertBlock(const framework::BlockDesc& block, TensorRTEngine* engine) {
for (auto op : block.AllOps()) {
OpConverter::Run(*op, engine);
void ConvertBlock(const framework::proto::BlockDesc& block,
TensorRTEngine* engine) {
for (size_t i = 0; i < block.ops_size(); i++) {
const auto& op = block.ops(i);
OpConverter::Run(op, engine);
}
}

Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/inference/tensorrt/convert/test_activation_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ void Compare(float input, float expect) {
op_desc.SetInput("X", {"X"});
op_desc.SetOutput("Out", {"Out"});

auto relu_op = framework::OpRegistry::CreateOp(op_desc);
auto relu_op = framework::OpRegistry::CreateOp(*op_desc.Proto());

// run fluid op
relu_op->Run(scope, place);
Expand All @@ -65,7 +65,7 @@ void Compare(float input, float expect) {
nvinfer1::DimsCHW{1, 1, 1});

OpConverter op_converter;
op_converter.ConvertOp(op_desc, engine);
op_converter.ConvertOp(*op_desc.Proto(), engine);

engine->DeclareOutput("Out");
engine->FreezeNetwork();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ TEST(OpConverter, ConvertBlock) {
conv2d_op->SetType("conv2d");

OpConverter converter;
converter.ConvertBlock(*block, nullptr /*TensorRTEngine*/);
converter.ConvertBlock(*block->Proto(), nullptr /*TensorRTEngine*/);
}

} // namespace tensorrt
Expand Down