diff --git a/paddle/fluid/framework/attribute.h b/paddle/fluid/framework/attribute.h index e9e1875765633..37d399b7779a7 100644 --- a/paddle/fluid/framework/attribute.h +++ b/paddle/fluid/framework/attribute.h @@ -322,8 +322,19 @@ class TypedAttrChecker { typedef std::function ValueChecker; public: - explicit TypedAttrChecker(const std::string& attr_name) - : attr_name_(attr_name) {} + explicit TypedAttrChecker(const std::string& attr_name, + proto::OpProto_Attr* attr) + : attr_name_(attr_name), attr_(attr) {} + + TypedAttrChecker& AsExtra() { + attr_->set_extra(true); + return *this; + } + + TypedAttrChecker& AsQuant() { + attr_->set_quant(true); + return *this; + } TypedAttrChecker& InEnum(const std::unordered_set& range) { value_checkers_.push_back(EnumInContainer(range)); @@ -398,6 +409,7 @@ class TypedAttrChecker { private: std::string attr_name_; + proto::OpProto_Attr* attr_; std::vector value_checkers_; std::vector default_value_setter_; }; @@ -408,8 +420,9 @@ class OpAttrChecker { public: template - TypedAttrChecker& AddAttrChecker(const std::string& attr_name) { - attr_checkers_.push_back(TypedAttrChecker(attr_name)); + TypedAttrChecker& AddAttrChecker(const std::string& attr_name, + proto::OpProto_Attr* attr) { + attr_checkers_.push_back(TypedAttrChecker(attr_name, attr)); AttrChecker& checker = attr_checkers_.back(); return *(checker.target>()); } diff --git a/paddle/fluid/framework/framework.proto b/paddle/fluid/framework/framework.proto index 73103eb28274c..eb72d9e1420dc 100644 --- a/paddle/fluid/framework/framework.proto +++ b/paddle/fluid/framework/framework.proto @@ -90,6 +90,8 @@ message OpProto { optional bool duplicable = 3 [ default = false ]; optional bool intermediate = 4 [ default = false ]; optional bool dispensable = 5 [ default = false ]; + optional bool extra = 6 [ default = false ]; + optional bool quant = 7 [ default = false ]; } // AttrProto describes the C++ type Attribute. @@ -101,6 +103,8 @@ message OpProto { // language binding has responsibility to fill that // attribute. End-User should not set that attribute. optional bool generated = 4 [ default = false ]; + optional bool extra = 5 [ default = false ]; + optional bool quant = 6 [ default = false ]; } required string type = 1; diff --git a/paddle/fluid/framework/ir/op_compat_sensible_pass_tester.cc b/paddle/fluid/framework/ir/op_compat_sensible_pass_tester.cc index 9602cd41131ce..756d3c2c77096 100644 --- a/paddle/fluid/framework/ir/op_compat_sensible_pass_tester.cc +++ b/paddle/fluid/framework/ir/op_compat_sensible_pass_tester.cc @@ -102,7 +102,7 @@ TEST(OpCompatSensiblePass, compatOpAttribute) { EXPECT_FALSE(compat.Judge(fc_op, "test_pass")); OpCompat compat_1("fc_test"); - info.checker_->AddAttrChecker("in_num_col_dims").SetDefault(1); + info.checker_->AddAttrChecker("in_num_col_dims", nullptr).SetDefault(1); EXPECT_TRUE(compat_1.Judge(fc_op, "test_pass")); delete info.checker_; delete info.proto_; diff --git a/paddle/fluid/framework/op_proto_maker.cc b/paddle/fluid/framework/op_proto_maker.cc index 8fbea51584d3c..fc2012b1176ef 100644 --- a/paddle/fluid/framework/op_proto_maker.cc +++ b/paddle/fluid/framework/op_proto_maker.cc @@ -80,19 +80,24 @@ void OpProtoAndCheckerMaker::operator()(proto::OpProto* proto, static_cast(OpRole::kOptimize) | static_cast(OpRole::kLRSched), static_cast(OpRole::kNotSpecified)}) - .SetDefault(static_cast(OpRole::kNotSpecified)); + .SetDefault(static_cast(OpRole::kNotSpecified)) + .AsExtra(); AddAttr>(OpRoleVarAttrName(), "Optimized for variable") - .SetDefault({}); + .SetDefault({}) + .AsExtra(); AddAttr(OpNamescopeAttrName(), "Operator name with namesope.") - .SetDefault(""); + .SetDefault("") + .AsExtra(); AddAttr>(OpCreationCallstackAttrName(), "Callstack for Op Creatation.") - .SetDefault({}); + .SetDefault({}) + .AsExtra(); AddAttr(OpDeviceAttrName(), "Device type of this operator.") - .SetDefault(""); + .SetDefault("") + .AsExtra(); Validate(); } diff --git a/paddle/fluid/framework/op_proto_maker.h b/paddle/fluid/framework/op_proto_maker.h index 506c3eb1e0ad0..932c76e242581 100644 --- a/paddle/fluid/framework/op_proto_maker.h +++ b/paddle/fluid/framework/op_proto_maker.h @@ -75,6 +75,16 @@ class OpProtoAndCheckerMaker { var_->set_dispensable(true); return *this; } + + VariableBuilder &AsExtra() { + var_->set_extra(true); + return *this; + } + + VariableBuilder &AsQuant() { + var_->set_quant(true); + return *this; + } }; VariableBuilder AddInput(const std::string &name, const std::string &comment); @@ -91,7 +101,7 @@ class OpProtoAndCheckerMaker { attr->set_comment(comment); attr->set_generated(generated); attr->set_type(AttrTypeID()); - return op_checker_->AddAttrChecker(name); + return op_checker_->AddAttrChecker(name, attr); } void AddComment(const std::string &comment) { proto_->set_comment(comment); } diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index 9defe3262ff4c..bef3826e728fe 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -246,7 +246,8 @@ void Conv2DOpMaker::Make() { AddAttr("is_test", "(bool, default false) Set to true for inference only, false " "for training. Some layers may run faster when this is true.") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddInput("Input", "(Tensor) The input tensor of convolution operator. " "The format of input tensor is NCHW or NHWC, where N is batch size, " @@ -264,12 +265,14 @@ void Conv2DOpMaker::Make() { "(Tensor) Bias to be added to each output of filter application." "The format of output tensor is X (one-dimensional) of size equal" "to the number of output channels. Only used with MKL-DNN.") - .AsDispensable(); + .AsDispensable() + .AsExtra(); AddInput("ResidualData", "(Tensor) Tensor with residual data " "to which convolution output will be added." "Used with fuse_residual_connection fusion.") - .AsDispensable(); + .AsDispensable() + .AsExtra(); AddOutput("Output", "(Tensor) The output tensor of convolution operator. " "It has same data fromat and data type as the Input."); @@ -306,69 +309,87 @@ void Conv2DOpMaker::Make() { AddAttr( "use_cudnn", "(bool, default false) Only used in cudnn kernel, need install cudnn") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddAttr("fuse_relu_before_depthwise_conv", "(bool, default false) Only used in cuda depthwise kernel") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddAttr("use_mkldnn", "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddAttr( "use_quantizer", "(bool, default false) " "This parameter is no longer used. Use 'mkldnn_data_type' instead.") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddAttr( "mkldnn_data_type", "(string, default \"float32\"). Data type of mkldnn kernel") .SetDefault("float32") - .InEnum({"float32", "int8", "bfloat16"}); + .InEnum({"float32", "int8", "bfloat16"}) + .AsExtra(); AddAttr("fuse_relu", "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddAttr("fuse_brelu", "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddAttr("fuse_brelu_threshold", "(float, default false 6.0) Only used in mkldnn kernel") - .SetDefault(6.0f); + .SetDefault(6.0f) + .AsExtra(); AddAttr("fuse_activation", "(string, default \"\") Only used in mkldnn kernel") - .SetDefault(""); + .SetDefault("") + .AsExtra(); AddAttr("fuse_alpha", "(float, default 0.0) Only used in mkldnn kernel") - .SetDefault(0.0f); + .SetDefault(0.0f) + .AsExtra(); AddAttr("fuse_beta", "(float, default 0.0) Only used in mkldnn kernel") - .SetDefault(0.0f); + .SetDefault(0.0f) + .AsExtra(); AddAttr( "use_addto", "(bool, default false) If use addto strategy or not, only used in " "cudnn kernel") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddAttr("fuse_residual_connection", "(bool, default false) Only used in mkldnn kernel. Used " "whenever convolution output is as an input to residual " "connection.") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddAttr("Scale_in", "Scale_in to be used for int8 input data." "Only used with MKL-DNN INT8.") - .SetDefault(1.0f); + .SetDefault(1.0f) + .AsExtra(); AddAttr("Scale_out", "Scale_out to be used for int8 output data." "Only used with MKL-DNN INT8.") - .SetDefault(1.0f); + .SetDefault(1.0f) + .AsExtra(); AddAttr("Scale_in_eltwise", "Scale_in_eltwise to be used for int8 eltwise input data." "Only used with MKL-DNN INT8.") - .SetDefault(1.0f); + .SetDefault(1.0f) + .AsExtra(); AddAttr>("Scale_weights", "Scale_weights to be used for int8 weights data." "Only used with MKL-DNN INT8.") - .SetDefault({1.0f}); + .SetDefault({1.0f}) + .AsExtra(); AddAttr("force_fp32_output", "(bool, default false) Force INT8 kernel output FP32, only " "used in MKL-DNN INT8") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddAttr( "data_format", "(string, default NCHW) Only used in " @@ -384,12 +405,14 @@ void Conv2DOpMaker::Make() { "allocated/freed each time the operator runs, larger " "workspace size can increase performance but also requires " "better hardware. This size should be chosen carefully.") - .SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB()); + .SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB()) + .AsExtra(); AddAttr("exhaustive_search", "(bool, default false) cuDNN has many algorithm to calculation " "convolution, whether enable exhaustive search " "for cuDNN convolution or not, default is False.") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddComment(R"DOC( Convolution Operator. @@ -426,7 +449,8 @@ void Conv3DOpMaker::Make() { AddAttr("is_test", "(bool, default false) Set to true for inference only, false " "for training. Some layers may run faster when this is true.") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddInput( "Input", "(Tensor) The input tensor of convolution operator. " @@ -447,7 +471,8 @@ void Conv3DOpMaker::Make() { "(Tensor) Tensor with residual data " "to which convolution output will be added." "Used with fuse_residual_connection fusion.") - .AsDispensable(); + .AsDispensable() + .AsExtra(); AddOutput("Output", "(Tensor) The output tensor of convolution operator." "It has same data fromat and data type as the Input."); @@ -485,35 +510,44 @@ void Conv3DOpMaker::Make() { AddAttr( "use_cudnn", "(bool, default false) Only used in cudnn kernel, need install cudnn") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddAttr("use_mkldnn", "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddAttr( "mkldnn_data_type", "(string, default \"float32\"). Data type of mkldnn kernel") .SetDefault("float32") - .InEnum({"float32", "int8", "bfloat16"}); + .InEnum({"float32", "int8", "bfloat16"}) + .AsExtra(); AddAttr("fuse_relu", "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddAttr("fuse_activation", "(string, default \"\") Only used in mkldnn kernel") - .SetDefault(""); + .SetDefault("") + .AsExtra(); AddAttr("fuse_alpha", "(float, default 0.0) Only used in mkldnn kernel") - .SetDefault(0.0f); + .SetDefault(0.0f) + .AsExtra(); AddAttr("fuse_beta", "(float, default 0.0) Only used in mkldnn kernel") - .SetDefault(0.0f); + .SetDefault(0.0f) + .AsExtra(); AddAttr( "use_addto", "(bool, default false) If use addto strategy or not, only used in " "cudnn kernel") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddAttr("fuse_residual_connection", "(bool, default false) Only used in mkldnn kernel. Used " "whenever convolution output is as an input to residual " "connection.") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddAttr( "data_format", "(string, default NCDHW) Only used in " @@ -523,7 +557,8 @@ void Conv3DOpMaker::Make() { .SetDefault("NCDHW"); AddAttr("force_fp32_output", "(bool, default false) Only used in mkldnn INT8 kernel") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); // TODO(dzhwinter): need to registered layout transform function AddAttr("workspace_size_MB", "Only used in cudnn kernel. workspace size for cudnn, in MB, " @@ -531,12 +566,14 @@ void Conv3DOpMaker::Make() { "allocated/freed each time the operator runs, larger " "workspace size can increase performance but also requires " "better hardware. This size should be chosen carefully.") - .SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB()); + .SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB()) + .AsExtra(); AddAttr("exhaustive_search", "(bool, default false) cuDNN has many algorithm to calculation " "convolution, whether enable exhaustive search " "for cuDNN convolution or not, default is False.") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddComment(R"DOC( Convolution3D Operator.