Skip to content

Commit

Permalink
Support older versions 6, 11, 12 for Clip Op (llvm#1100)
Browse files Browse the repository at this point in the history
Signed-off-by: Tung D. Le <tung@jp.ibm.com>

Co-authored-by: Alexandre Eichenberger <alexe@us.ibm.com>
  • Loading branch information
tungld and AlexandreEichenberger authored Jan 17, 2022
1 parent 76fce87 commit bd1bc6c
Show file tree
Hide file tree
Showing 7 changed files with 154 additions and 8 deletions.
8 changes: 7 additions & 1 deletion src/Builder/OpBuildTable.inc
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ op_dialect_version_map_["CastMap"] = {1};
op_dialect_version_map_["CategoryMapper"] = {1};
op_dialect_version_map_["Ceil"] = {13};
op_dialect_version_map_["Celu"] = {12};
op_dialect_version_map_["Clip"] = {13};
op_dialect_version_map_["Clip"] = {13, 12, 11, 6};
op_dialect_version_map_["Compress"] = {11};
op_dialect_version_map_["Concat"] = {13};
op_dialect_version_map_["ConcatFromSequence"] = {11};
Expand Down Expand Up @@ -224,6 +224,12 @@ import_handler_map_["Celu"] =
&onnx_mlir::detail::FrontendGenImpl::buildOperation<mlir::ONNXCeluOp>;
import_handler_map_["Clip"] =
&onnx_mlir::detail::FrontendGenImpl::buildOperation<mlir::ONNXClipOp>;
import_handler_map_["ClipV12"] =
&onnx_mlir::detail::FrontendGenImpl::buildOperation<mlir::ONNXClipV12Op>;
import_handler_map_["ClipV11"] =
&onnx_mlir::detail::FrontendGenImpl::buildOperation<mlir::ONNXClipV11Op>;
import_handler_map_["ClipV6"] =
&onnx_mlir::detail::FrontendGenImpl::buildOperation<mlir::ONNXClipV6Op>;
import_handler_map_["Compress"] =
&onnx_mlir::detail::FrontendGenImpl::buildOperation<mlir::ONNXCompressOp>;
import_handler_map_["Concat"] =
Expand Down
3 changes: 3 additions & 0 deletions src/Dialect/ONNX/ONNXOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4688,6 +4688,9 @@ NOT_IMPLEMENTED_INFERSHAPE(ONNXPadV2Op);
NOT_IMPLEMENTED_INFERSHAPE(ONNXPadV11Op);
NOT_IMPLEMENTED_INFERSHAPE(ONNXResizeV11Op);
NOT_IMPLEMENTED_INFERSHAPE(ONNXResizeV10Op);
NOT_IMPLEMENTED_INFERSHAPE(ONNXClipV6Op);
NOT_IMPLEMENTED_INFERSHAPE(ONNXClipV11Op);
NOT_IMPLEMENTED_INFERSHAPE(ONNXClipV12Op);

//===----------------------------------------------------------------------===//
// Loop
Expand Down
75 changes: 75 additions & 0 deletions src/Dialect/ONNX/ONNXOps.td.inc
Original file line number Diff line number Diff line change
Expand Up @@ -586,6 +586,81 @@ def ONNXClipOp:ONNX_Op<"Clip",
}];
}

def ONNXClipV12Op:ONNX_Op<"ClipV12",
[NoSideEffect, DeclareOpInterfaceMethods<ShapeInferenceOpInterface>]> {
let summary = "ONNX Clip operation";
let description = [{
"Clip operator limits the given input within an interval. The interval is"
"specified by the inputs 'min' and 'max'. They default to"
"numeric_limits::lowest() and numeric_limits::max(), respectively."
}];
let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$input,
AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef, NoneType]>:$min,
AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef, NoneType, NoneType]>:$max);
let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef, NoneType, NoneType]>:$output);
let extraClassDeclaration = [{
static int getNumberOfOperands() {
return 3;
}
static int getNumberOfResults() {
return 1;
}
static std::vector<int> getTypeMap() {
return {20};
}
}];
}

def ONNXClipV11Op:ONNX_Op<"ClipV11",
[NoSideEffect, DeclareOpInterfaceMethods<ShapeInferenceOpInterface>]> {
let summary = "ONNX Clip operation";
let description = [{
"Clip operator limits the given input within an interval. The interval is"
"specified by the inputs 'min' and 'max'. They default to"
"numeric_limits::lowest() and numeric_limits::max(), respectively."
}];
let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$input,
AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef, NoneType]>:$min,
AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef, NoneType, NoneType]>:$max);
let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef, NoneType, NoneType]>:$output);
let extraClassDeclaration = [{
static int getNumberOfOperands() {
return 3;
}
static int getNumberOfResults() {
return 1;
}
static std::vector<int> getTypeMap() {
return {20};
}
}];
}

def ONNXClipV6Op:ONNX_Op<"ClipV6",
[NoSideEffect, DeclareOpInterfaceMethods<ShapeInferenceOpInterface>]> {
let summary = "ONNX Clip operation";
let description = [{
"Clip operator limits the given input within an interval. The interval is"
"specified with arguments 'min' and 'max'. They default to"
"numeric_limits::lowest() and numeric_limits::max() respectively."
}];
let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$input,
DefaultValuedAttr<F32Attr, "(3.402823e+38)">:$max,
DefaultValuedAttr<F32Attr, "(-3.402823e+38)">:$min);
let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$output);
let extraClassDeclaration = [{
static int getNumberOfOperands() {
return 1;
}
static int getNumberOfResults() {
return 1;
}
static std::vector<int> getTypeMap() {
return {20};
}
}];
}

def ONNXCompressOp:ONNX_Op<"Compress",
[NoSideEffect, DeclareOpInterfaceMethods<ShapeInferenceOpInterface>]> {
let summary = "ONNX Compress operation";
Expand Down
36 changes: 30 additions & 6 deletions src/Transform/ONNX/Decompose.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,27 @@ DenseElementsAttr createDenseArrayAttr(
llvm_unreachable("unexpected attribute type");
}

/// Create an Scalar DenseElementsAttr from FloatAttr or IntergerAttr.
/// This is used to create an ONNXConstant of rank 0, e.g. tensor<f32>.
DenseElementsAttr createScalarDenseAttr(
PatternRewriter &rewriter, Attribute attr) {
if (attr.dyn_cast<FloatAttr>()) {
mlir::Type elementType = rewriter.getF32Type();
SmallVector<float, 1> wrapper;
wrapper.emplace_back(attr.cast<FloatAttr>().getValueAsDouble());
return DenseElementsAttr::get(
RankedTensorType::get({}, elementType), llvm::makeArrayRef(wrapper));
}
if (attr.dyn_cast<IntegerAttr>()) {
mlir::Type elementType = rewriter.getIntegerType(64);
SmallVector<int64_t, 1> wrapper;
wrapper.emplace_back(attr.cast<IntegerAttr>().getInt());
return DenseElementsAttr::get(
RankedTensorType::get({}, elementType), llvm::makeArrayRef(wrapper));
}
llvm_unreachable("unexpected attribute type");
}

ConstantOp createUnitConstant(PatternRewriter &rewriter, Location loc) {
return rewriter.create<ConstantOp>(loc, rewriter.getUnitAttr());
}
Expand Down Expand Up @@ -124,21 +145,24 @@ void DecomposeONNXToONNXPass::runOnFunction() {

// These ops will be decomposed into other ONNX ops. Hence, they will not be
// available after this pass.
target.addIllegalOp<ONNXClipV6Op>();
target.addIllegalOp<ONNXClipV11Op>();
target.addIllegalOp<ONNXClipV12Op>();
target.addIllegalOp<ONNXLogSoftmaxOp>();
target.addIllegalOp<ONNXPadV2Op>();
target.addIllegalOp<ONNXPadV11Op>();
target.addIllegalOp<ONNXReduceL1Op>();
target.addIllegalOp<ONNXReduceL2Op>();
target.addIllegalOp<ONNXReduceLogSumOp>();
target.addIllegalOp<ONNXReduceLogSumExpOp>();
target.addIllegalOp<ONNXReduceSumSquareOp>();
target.addIllegalOp<ONNXResizeV11Op>();
target.addIllegalOp<ONNXResizeV10Op>();
target.addIllegalOp<ONNXScalerOp>();
target.addIllegalOp<ONNXLogSoftmaxOp>();
target.addIllegalOp<ONNXSequenceConstructOp>();
target.addIllegalOp<ONNXUpsampleOp>();
target.addIllegalOp<ONNXUpsampleV9Op>();
target.addIllegalOp<ONNXUpsampleV7Op>();
target.addIllegalOp<ONNXPadV2Op>();
target.addIllegalOp<ONNXPadV11Op>();
target.addIllegalOp<ONNXResizeV11Op>();
target.addIllegalOp<ONNXResizeV10Op>();
target.addIllegalOp<ONNXSequenceConstructOp>();

RewritePatternSet patterns(context);
populateWithGenerated(patterns);
Expand Down
24 changes: 24 additions & 0 deletions src/Transform/ONNX/Decompose.td
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,11 @@ def GetNullStringAttr :
def CreateUnitConstant
: NativeCodeCall<"createUnitConstant($_builder, $_loc)">;

// Create a scalar DenseElementsAttr (rank 0) from a single attribute.
// E.g return type is tensor<f32> instead of tensor<0xf32> or tensor<1xf32>
def createScalarDenseAttrRank0
: NativeCodeCall<"createScalarDenseAttr($_builder, $0)">;

// Create a DenseElementsAttr from a single attribute.
def createDenseArrayAttrFromSingleAttr
: NativeCodeCall<"createDenseArrayAttr($_builder, $_builder.getArrayAttr($0))">;
Expand Down Expand Up @@ -317,4 +322,23 @@ def SequenceConstructPattern1: Pat<
$x1)
>;

// Express Clip V6 using Clip V11.
def ClipV6Pattern : Pat<
(ONNXClipV6Op $x, $maxAttr, $minAttr),
(ONNXClipV11Op $x, (ONNXConstantOpFromDenseAttr(createScalarDenseAttrRank0 $minAttr)),
(ONNXConstantOpFromDenseAttr(createScalarDenseAttrRank0 $maxAttr)))
>;

// Express Clip V11 using Clip V12.
def ClipV11Pattern : Pat<
(ONNXClipV11Op $x, $min, $max),
(ONNXClipV12Op $x, $min, $max)
>;

// Express Clip V12 using Clip V13 (the lastest).
def ClipV12Pattern : Pat<
(ONNXClipV12Op $x, $min, $max),
(ONNXClipOp $x, $min, $max)
>;

#endif // ONNX_DECOMPOSE
14 changes: 14 additions & 0 deletions test/mlir/onnx/onnx_decompose.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -309,3 +309,17 @@ func @test_seqence_construct_1(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> !o
// CHECK: return [[VAR_2_]] : !onnx.Seq<tensor<*xf32>>
}

// -----

func @test_clipv6(%arg0 : tensor<*xf32>) -> () {
%0 = "onnx.ClipV6"(%arg0) {max = 6.000000e+00 : f32, min = 0.000000e+00 : f32} : (tensor<*xf32>) -> tensor<*xf32>
return

// CHECK-LABEL: func @test_clipv6
// CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<*xf32>) {
// CHECK-DAG: [[VAR_0_:%.+]] = "onnx.Constant"() {value = dense<0.000000e+00> : tensor<f32>} : () -> tensor<f32>
// CHECK-DAG: [[VAR_1_:%.+]] = "onnx.Constant"() {value = dense<6.000000e+00> : tensor<f32>} : () -> tensor<f32>
// CHECK: [[VAR_2_:%.+]] = "onnx.Clip"([[PARAM_0_]], [[VAR_0_]], [[VAR_1_]]) : (tensor<*xf32>, tensor<f32>, tensor<f32>) -> tensor<*xf32>
// CHECK: return
// CHECK: }
}
2 changes: 1 addition & 1 deletion utils/gen_onnx_mlir.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@
'CategoryMapper': [1],
'Ceil': [13],
'Celu': [12],
'Clip': [13],
'Clip': [13, 12, 11, 6],
'Compress': [11],
'Concat': [13],
'ConcatFromSequence': [11],
Expand Down

0 comments on commit bd1bc6c

Please sign in to comment.