Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Paddle Inference] support ernie quant model with interleaved #39424

Merged
merged 7 commits into from
Feb 11, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -334,6 +334,9 @@ int EmbeddingEltwiseLayerNormFusePass::BuildFusion(

if (end_patter_layernorms[k]->Op()->HasAttr("out_threshold")) {
new_op_desc.SetAttr("enable_int8", true);
new_op_desc.SetAttr(
"out_threshold",
end_patter_layernorms[k]->Op()->GetAttr("out_threshold"));
}

auto* embedding_eltwise_layernorm = graph->CreateOpNode(&new_op_desc);
Expand Down
14 changes: 10 additions & 4 deletions paddle/fluid/framework/ir/multihead_matmul_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -501,7 +501,6 @@ PDNode* MultiHeadMatmulV3Pattern::operator()() {
auto* reshape2_qkv_out_var = pattern->NewNode(reshape2_qkv_out_repr())
->assert_is_op_output("reshape2");
reshape2_qkv_out_var->assert_is_ops_input(matmul_ops);

// Second path to matmul
auto* mul1 = pattern->NewNode(mul1_repr())->assert_is_ops(matmul_ops);
auto* mul1_w_var = pattern->NewNode(mul1_w_repr())
Expand Down Expand Up @@ -671,6 +670,7 @@ MultiHeadMatmulV2FusePass::MultiHeadMatmulV2FusePass() {
.IsTensor()
.End()
.AddOutput("XShape")
.IsOptional()
.IsTensor()
.End()
.AddAttr("shape") // -->(B, S, H, N) <--(B, S, N*H)
Expand All @@ -687,6 +687,7 @@ MultiHeadMatmulV2FusePass::MultiHeadMatmulV2FusePass() {
.IsTensor()
.End()
.AddOutput("XShape")
.IsOptional()
.IsTensor()
.End()
.AddAttr("axis") // {0, 2, 1, 3}
Expand Down Expand Up @@ -761,7 +762,7 @@ int MultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph,
Node* eltadd0_b, Node* eltadd1_b, Node* eltadd2_b, Node* eltadd_qk_b,
Node* reshape2, Node* reshape2_qkv_out, Node* scale, Node* scale_out,
Node* softmax_qk, Node* eltadd0, Node* eltadd1, Node* eltadd2,
Node* matmul_qk) {
Node* matmul_qk, Node* reshape2_qkv) {
auto scale_attr = BOOST_GET_CONST(float, scale->Op()->GetAttr("scale"));

// mul (B * S * Hidden) x (Hidden * 3 * N * H) = (B * S * 3 * N * H)
Expand Down Expand Up @@ -905,7 +906,10 @@ int MultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph,
multihead_op_desc.SetAttr("dp_probs", qkv_plugin_scale);
}
}

if (reshape2_qkv->Op()->HasAttr("out_threshold")) {
multihead_op_desc.SetAttr("out_threshold",
reshape2_qkv->Op()->GetAttr("out_threshold"));
}
auto* multihead = graph->CreateOpNode(&multihead_op_desc);

IR_NODE_LINK_TO(input0, multihead);
Expand Down Expand Up @@ -1008,7 +1012,7 @@ int MultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph,
fuse_creater(input0, mul0, mul1, mul2, mul0_out, mul1_out, mul2_out, mul0_w,
mul1_w, mul2_w, eltadd0_b, eltadd1_b, eltadd2_b, eltadd_qk_b,
reshape2_0, reshape2_qkv_out, scale, scale_out, softmax_qk,
eltadd0, eltadd1, eltadd2, matmul_qk);
eltadd0, eltadd1, eltadd2, matmul_qk, reshape2_qkv);

std::unordered_set<const Node*> marked_nodes({eltadd0,
eltadd1,
Expand Down Expand Up @@ -1130,6 +1134,7 @@ MultiHeadMatmulV3FusePass::MultiHeadMatmulV3FusePass() {
.IsTensor()
.End()
.AddOutput("XShape")
.IsOptional()
.IsTensor()
.End()
.AddAttr("shape") // -->(B, S, H, N) <--(B, S, N*H)
Expand All @@ -1146,6 +1151,7 @@ MultiHeadMatmulV3FusePass::MultiHeadMatmulV3FusePass() {
.IsTensor()
.End()
.AddOutput("XShape")
.IsOptional()
.IsTensor()
.End()
.AddAttr("axis") // {0, 2, 1, 3}
Expand Down
4 changes: 3 additions & 1 deletion paddle/fluid/framework/ir/skip_layernorm_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -158,8 +158,10 @@ void SkipLayerNormFusePass::ApplyImpl(ir::Graph *graph) const {
new_desc.SetInput("Scale", {layer_norm_scale->Name()});
new_desc.SetInput("Bias", {layer_norm_bias->Name()});

if (elementwise->Op()->HasAttr("out_threshold")) {
if (layer_norm->Op()->HasAttr("out_threshold")) {
new_desc.SetAttr("enable_int8", true);
new_desc.SetAttr("out_threshold",
layer_norm->Op()->GetAttr("out_threshold"));
}

// outputs
Expand Down
44 changes: 38 additions & 6 deletions paddle/fluid/inference/tensorrt/convert/emb_eltwise_layernorm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,6 @@ class EmbEltwiseLayerNormOpConverter : public OpConverter {
{"output_fp16", &output_fp16, nvinfer1::PluginFieldType::kINT32, 1},
};

// remember to free
nvinfer1::PluginFieldCollection* plugin_ptr =
static_cast<nvinfer1::PluginFieldCollection*>(
malloc(sizeof(*plugin_ptr) +
Expand All @@ -168,6 +167,11 @@ class EmbEltwiseLayerNormOpConverter : public OpConverter {
shape_dim.nbDims = 1;
shape_dim.d[0] = -1;
shuffle_layer->setReshapeDimensions(shape_dim);
shuffle_layer->setName(
("Embeltwise_Shuffle_reshape (Output: max_seqlen " +
op_desc.Output("Out")[0] + ")")
.c_str());
engine_->SetTensorDynamicRange(shuffle_layer->getOutput(0), 1.0f);
plugin_inputs.emplace_back(
shuffle_layer->getOutput(0)); // max_seqlen, eval_placeholder_3

Expand All @@ -178,12 +182,40 @@ class EmbEltwiseLayerNormOpConverter : public OpConverter {
creator->createPlugin("CustomEmbLayerNormPluginDynamic", plugin_ptr);
auto plugin_layer = engine_->network()->addPluginV2(
plugin_inputs.data(), plugin_inputs.size(), *plugin_obj);
layer = plugin_layer;
plugin_layer->setName(("CustomEmbLayerNormPluginDynamic_V2(Output: " +
op_desc.Output("Out")[0] + ")")
.c_str());
free(plugin_ptr);
auto output_name = op_desc.Output("Out")[0];
RreplenishLayerAndOutput(layer, "emb_eltwise_layernorm",
{output_name, std::string("qkv_plugin_mask")},
test_mode);
if (enable_int8) {
float out_scale =
BOOST_GET_CONST(float, op_desc.GetAttr("out_threshold"));
engine_->SetTensorDynamicRange(plugin_layer->getOutput(0), out_scale);
engine_->SetTensorDynamicRange(plugin_layer->getOutput(1), out_scale);
}
if (engine_->with_interleaved()) {
VLOG(4)
<< "fused emb_eltwise_layernorm op: use_oss and with_interleaved";
if (!enable_int8) {
PADDLE_THROW(
platform::errors::Fatal("use with_interleaved must be int8."));
}
auto* shuffler_embed = TRT_ENGINE_ADD_LAYER(
engine_, Shuffle, *(plugin_layer->getOutput(0)));
nvinfer1::Permutation transpose_embed{2, 1, 0, 3};
shuffler_embed->setSecondTranspose(transpose_embed);
engine_->SetITensor(op_desc.Output("Out")[0],
shuffler_embed->getOutput(0));
shuffler_embed->setName(
("Emb_eltwise_out_shuffler_transpose (Output: " +
op_desc.Output("Out")[0] + ")")
.c_str());
} else {
layer = plugin_layer;
auto output_name = op_desc.Output("Out")[0];
RreplenishLayerAndOutput(layer, "CustomEmbLayerNormPluginDynamic_V2",
{output_name, std::string("qkv_plugin_mask")},
test_mode);
}
} else {
bool with_fp16 =
engine_->WithFp16() && !engine_->disable_trt_plugin_fp16();
Expand Down
Loading