Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Integrate LLVM at llvm/llvm-project@4332b049edf6 #10180

Merged
merged 8 commits into from
Aug 24, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions build_tools/bazel_to_cmake/bazel_to_cmake_targets.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,10 @@
"tensorflow::external_mhlo_includes",
"MhloToStandard",
],
"@mlir-hlo//:map_chlo_to_hlo_op": [
"ChloDialect",
"MhloDialect",
],
"@mlir-hlo//:map_lmhlo_to_scalar_op": [
"tensorflow::external_mhlo_includes",
"LmhloDialect", # Unfortunate.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -436,8 +436,6 @@ void LinalgSingleTilingExpertPass::runOnOperation() {
strategy.tileIf(doTiling, anchorOpName, tilingOptions)
.padIf(pad, anchorOpName, paddingOptions)
.decomposeIf(decomposeToLowerDimOp)
.generalizeIf(generalize, anchorOpName)
.interchangeIf(!iteratorInterchange.empty(), iteratorInterchange)
.peelIf(peel, generalize ? genericOpName : anchorOpName, peelingOptions)
.vectorizeIf(vectorize, generalize ? genericOpName : anchorOpName,
nullptr, vectorizePadding);
Expand Down
1 change: 1 addition & 0 deletions compiler/src/iree/compiler/InputConversion/MHLO/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,7 @@ iree_compiler_cc_library(
"@mlir-hlo//:legalize_shape_computations",
"@mlir-hlo//:legalize_to_linalg",
"@mlir-hlo//:legalize_to_standard",
"@mlir-hlo//:map_chlo_to_hlo_op",
"@mlir-hlo//:map_lmhlo_to_scalar_op",
"@mlir-hlo//:map_mhlo_to_scalar_op",
"@mlir-hlo//:materialize_broadcasts",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include "iree/compiler/InputConversion/MHLO/Rewriters.h"
#include "mlir-hlo/Dialect/mhlo/IR/chlo_ops.h"
#include "mlir-hlo/Dialect/mhlo/IR/hlo_ops.h"
#include "mlir-hlo/Dialect/mhlo/transforms/map_chlo_to_hlo_op.h"
#include "mlir-hlo/Dialect/mhlo/transforms/rewriters.h"
#include "mlir-hlo/utils/broadcast_utils.h"
#include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
Expand Down Expand Up @@ -428,9 +429,15 @@ struct CompareBinaryBroadcastingAdaptor : public BinaryBroadcastingAdaptor {
BroadcastValues broadcastValues,
OpBuilder &builder) override {
chlo::BroadcastCompareOpAdaptor adaptor(operands, op->getAttrDictionary());
Optional<chlo::ComparisonType> chloCmpType = adaptor.compare_type();
mhlo::ComparisonTypeAttr mhloCmpType;
if (chloCmpType)
mhloCmpType = mhlo::ComparisonTypeAttr::get(
builder.getContext(), *chlo::mhloComparisonType(*chloCmpType));
return builder.create<mhlo::CompareOp>(
loc, resultType, broadcastValues.first, broadcastValues.second,
adaptor.comparison_direction(), adaptor.compare_typeAttr());
*chlo::mhloComparisonDirection(adaptor.comparison_direction()),
mhloCmpType);
}
};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,44 @@ namespace MHLO {

namespace {

inline llvm::Optional<chlo::ComparisonDirection> chloComparisonDirection(
mhlo::ComparisonDirection value) {
switch (value) {
case mhlo::ComparisonDirection::EQ:
return chlo::ComparisonDirection::EQ;
case mhlo::ComparisonDirection::NE:
return chlo::ComparisonDirection::NE;
case mhlo::ComparisonDirection::GE:
return chlo::ComparisonDirection::GE;
case mhlo::ComparisonDirection::GT:
return chlo::ComparisonDirection::GT;
case mhlo::ComparisonDirection::LE:
return chlo::ComparisonDirection::LE;
case mhlo::ComparisonDirection::LT:
return chlo::ComparisonDirection::LT;
default:
return {};
}
}

inline llvm::Optional<chlo::ComparisonType> chloComparisonType(
mhlo::ComparisonType value) {
switch (value) {
case mhlo::ComparisonType::NOTYPE:
return chlo::ComparisonType::NOTYPE;
case mhlo::ComparisonType::FLOAT:
return chlo::ComparisonType::FLOAT;
case mhlo::ComparisonType::TOTALORDER:
return chlo::ComparisonType::TOTALORDER;
case mhlo::ComparisonType::SIGNED:
return chlo::ComparisonType::SIGNED;
case mhlo::ComparisonType::UNSIGNED:
return chlo::ComparisonType::UNSIGNED;
default:
return {};
}
}

bool isComplexTensor(Value v) {
if (auto tt = v.getType().dyn_cast<TensorType>()) {
return tt.getElementType().isa<ComplexType>();
Expand Down Expand Up @@ -224,10 +262,54 @@ struct ConvertExpOp : public OpConversionPattern<mhlo::ExpOp> {
};

template <typename CompareOpTy, typename ComparatorOpTy>
struct ConvertCompareOp : public OpConversionPattern<CompareOpTy> {
struct ConvertCHLOCompareOp : public OpConversionPattern<CompareOpTy> {
using OpConversionPattern<CompareOpTy>::OpConversionPattern;
ConvertCHLOCompareOp(TypeConverter &typeConverter, MLIRContext *context,
chlo::ComparisonDirection direction)
: OpConversionPattern<CompareOpTy>(typeConverter, context),
direction(direction) {}

LogicalResult matchAndRewrite(
CompareOpTy op, typename CompareOpTy::Adaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
Location loc = op.getLoc();

if (!isComplexTensor(adaptor.lhs()) || !isComplexTensor(adaptor.rhs())) {
return rewriter.notifyMatchFailure(op, "not complex tensor");
}
if (direction != op.comparison_direction()) {
return rewriter.notifyMatchFailure(op, "not matching direction");
}

auto lhs = adaptor.lhs();
auto rhs = adaptor.rhs();
auto lhsReal = rewriter.createOrFold<mhlo::RealOp>(loc, lhs);
auto lhsImag = rewriter.createOrFold<mhlo::ImagOp>(loc, lhs);
auto rhsReal = rewriter.createOrFold<mhlo::RealOp>(loc, rhs);
auto rhsImag = rewriter.createOrFold<mhlo::ImagOp>(loc, rhs);

rewriter.replaceOpWithNewOp<ComparatorOpTy>(
op,
rewriter.create<chlo::BroadcastCompareOp>(
loc, lhsReal, rhsReal,
/*broadcast_dimensions=*/nullptr,
adaptor.comparison_directionAttr(), adaptor.compare_typeAttr()),
rewriter.create<chlo::BroadcastCompareOp>(
loc, lhsImag, rhsImag,
/*broadcast_dimensions=*/nullptr,
adaptor.comparison_directionAttr(), adaptor.compare_typeAttr()));

return success();
}

chlo::ComparisonDirection direction;
};

template <typename CompareOpTy, typename ComparatorOpTy>
struct ConvertMHLOCompareOp : public OpConversionPattern<CompareOpTy> {
using OpConversionPattern<CompareOpTy>::OpConversionPattern;
ConvertCompareOp(TypeConverter &typeConverter, MLIRContext *context,
mhlo::ComparisonDirection direction)
ConvertMHLOCompareOp(TypeConverter &typeConverter, MLIRContext *context,
mhlo::ComparisonDirection direction)
: OpConversionPattern<CompareOpTy>(typeConverter, context),
direction(direction) {}

Expand All @@ -250,16 +332,25 @@ struct ConvertCompareOp : public OpConversionPattern<CompareOpTy> {
auto rhsReal = rewriter.createOrFold<mhlo::RealOp>(loc, rhs);
auto rhsImag = rewriter.createOrFold<mhlo::ImagOp>(loc, rhs);

// If the input op is an mhlo op, we need to convert the attributes to the
// corresponding chlo one..
chlo::ComparisonDirection chloCmpDirection =
*chloComparisonDirection(adaptor.comparison_direction());

Optional<mhlo::ComparisonType> mhloCmpType = adaptor.compare_type();
chlo::ComparisonTypeAttr chloCmpType;
if (mhloCmpType)
chloCmpType = chlo::ComparisonTypeAttr::get(
rewriter.getContext(), *chloComparisonType(*mhloCmpType));

rewriter.replaceOpWithNewOp<ComparatorOpTy>(
op,
rewriter.create<chlo::BroadcastCompareOp>(
loc, lhsReal, rhsReal,
/*broadcast_dimensions=*/nullptr, op.comparison_directionAttr(),
op.compare_typeAttr()),
/*broadcast_dimensions=*/nullptr, chloCmpDirection, chloCmpType),
rewriter.create<chlo::BroadcastCompareOp>(
loc, lhsImag, rhsImag,
/*broadcast_dimensions=*/nullptr, op.comparison_directionAttr(),
op.compare_typeAttr()));
/*broadcast_dimensions=*/nullptr, chloCmpDirection, chloCmpType));

return success();
}
Expand Down Expand Up @@ -333,14 +424,14 @@ void populateMHLOComplexToRealPatterns(MLIRContext *context,
patterns.insert<ConvertExpOp>(typeConverter, context);

// Compare ops.
patterns.insert<ConvertCompareOp<mhlo::CompareOp, mhlo::OrOp>>(
typeConverter, context, mhlo::ComparisonDirection::NE);
patterns.insert<ConvertCompareOp<mhlo::CompareOp, mhlo::AndOp>>(
typeConverter, context, mhlo::ComparisonDirection::EQ);
patterns.insert<ConvertCompareOp<chlo::BroadcastCompareOp, mhlo::OrOp>>(
patterns.insert<ConvertMHLOCompareOp<mhlo::CompareOp, mhlo::OrOp>>(
typeConverter, context, mhlo::ComparisonDirection::NE);
patterns.insert<ConvertCompareOp<chlo::BroadcastCompareOp, mhlo::AndOp>>(
patterns.insert<ConvertMHLOCompareOp<mhlo::CompareOp, mhlo::AndOp>>(
typeConverter, context, mhlo::ComparisonDirection::EQ);
patterns.insert<ConvertCHLOCompareOp<chlo::BroadcastCompareOp, mhlo::OrOp>>(
typeConverter, context, chlo::ComparisonDirection::NE);
patterns.insert<ConvertCHLOCompareOp<chlo::BroadcastCompareOp, mhlo::AndOp>>(
typeConverter, context, chlo::ComparisonDirection::EQ);

// Complex/Real/Imag conversions should fold away.
// Note that this is an opinion taken because these patterns are targeted
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ func.func @dynamicBroadcastCompare(%arg0: tensor<?xf32>, %arg1: tensor<?x?xf32>)
// NOTE: compare is unique because of the element type switch. The pattern
// will fail or the verifier will catch it if wrong.
// CHECK-NOT: mhlo.compare
%0 = chlo.broadcast_compare %arg0, %arg1 {comparison_direction = #mhlo<comparison_direction EQ>} : (tensor<?xf32>, tensor<?x?xf32>) -> tensor<?x?xi1>
%0 = chlo.broadcast_compare %arg0, %arg1 {comparison_direction = #chlo<comparison_direction EQ>} : (tensor<?xf32>, tensor<?x?xf32>) -> tensor<?x?xi1>
return %0 : tensor<?x?xi1>
}

Expand Down Expand Up @@ -296,7 +296,7 @@ func.func @atan2WithoutBroadcast(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) ->
// CHECK-LABEL: @compareWithoutBroadcast
func.func @compareWithoutBroadcast(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xi1> {
// CHECK-NOT: mhlo.compare
%0 = chlo.broadcast_compare %arg0, %arg1 {comparison_direction = #mhlo<comparison_direction EQ>} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xi1>
%0 = chlo.broadcast_compare %arg0, %arg1 {comparison_direction = #chlo<comparison_direction EQ>} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xi1>
return %0 : tensor<4xi1>
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,8 +123,8 @@ func.func @compare_eq(%arg0 : tensor<2xf32>, %arg1 : tensor<2xf32>,
%arg2 : tensor<2xf32>, %arg3 : tensor<2xf32>) -> (tensor<2xi1>) {
%lhs = "mhlo.complex"(%arg0, %arg1) : (tensor<2xf32>, tensor<2xf32>) -> (tensor<2xcomplex<f32>>)
%rhs = "mhlo.complex"(%arg2, %arg3) : (tensor<2xf32>, tensor<2xf32>) -> (tensor<2xcomplex<f32>>)
// CHECK-DAG: %[[OUTR:.+]] = chlo.broadcast_compare %arg0, %arg2 {comparison_direction = #mhlo<comparison_direction EQ>}
// CHECK-DAG: %[[OUTI:.+]] = chlo.broadcast_compare %arg1, %arg3 {comparison_direction = #mhlo<comparison_direction EQ>}
// CHECK-DAG: %[[OUTR:.+]] = chlo.broadcast_compare %arg0, %arg2 {comparison_direction = #chlo<comparison_direction EQ>}
// CHECK-DAG: %[[OUTI:.+]] = chlo.broadcast_compare %arg1, %arg3 {comparison_direction = #chlo<comparison_direction EQ>}
// CHECK-DAG: %[[OUT:.+]] = mhlo.and %[[OUTR]], %[[OUTI]]
%0 = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<comparison_direction EQ>} : (tensor<2xcomplex<f32>>, tensor<2xcomplex<f32>>) -> tensor<2xi1>

Expand All @@ -137,8 +137,8 @@ func.func @compare_ne(%arg0 : tensor<2xf32>, %arg1 : tensor<2xf32>,
%arg2 : tensor<2xf32>, %arg3 : tensor<2xf32>) -> (tensor<2xi1>) {
%lhs = "mhlo.complex"(%arg0, %arg1) : (tensor<2xf32>, tensor<2xf32>) -> (tensor<2xcomplex<f32>>)
%rhs = "mhlo.complex"(%arg2, %arg3) : (tensor<2xf32>, tensor<2xf32>) -> (tensor<2xcomplex<f32>>)
// CHECK-DAG: %[[OUTR:.+]] = chlo.broadcast_compare %arg0, %arg2 {comparison_direction = #mhlo<comparison_direction NE>}
// CHECK-DAG: %[[OUTI:.+]] = chlo.broadcast_compare %arg1, %arg3 {comparison_direction = #mhlo<comparison_direction NE>}
// CHECK-DAG: %[[OUTR:.+]] = chlo.broadcast_compare %arg0, %arg2 {comparison_direction = #chlo<comparison_direction NE>}
// CHECK-DAG: %[[OUTI:.+]] = chlo.broadcast_compare %arg1, %arg3 {comparison_direction = #chlo<comparison_direction NE>}
// CHECK-DAG: %[[OUT:.+]] = mhlo.or %[[OUTR]], %[[OUTI]]
%0 = "mhlo.compare"(%lhs, %rhs) {comparison_direction = #mhlo<comparison_direction NE>} : (tensor<2xcomplex<f32>>, tensor<2xcomplex<f32>>) -> tensor<2xi1>

Expand Down
2 changes: 1 addition & 1 deletion integrations/tensorflow/WORKSPACE
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")

TENSORFLOW_COMMIT = "8a7764be0d32a72ad6d93ff3216520af184e26a0"
TENSORFLOW_COMMIT = "55791c2c29704da7f52011eeaeb4ec70835fbb7d"

git_repository(
name = "org_tensorflow",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ transform.structured.canonicalized_sequence {
%1, %loops1:3 = transform.structured.tile %0 [4, 4, 4]
// CHECK: %[[TILED2:.*]], %{{.*}}:3 = transform.structured.tile %[[TILED]]
%2, %loops2:3 = transform.structured.tile %1 [2, 2, 2]
// CHECK: %[[PADDED:.*]] = transform.structured.pad %[[TILED2]] {pack_paddings = [1, 1, 0]}
// CHECK: %[[PADDED:.*]] = transform.structured.pad %[[TILED2]] {hoist_paddings = [], pack_paddings = [1, 1, 0], padding_dimensions = [], padding_values = [], transpose_paddings = []}
%3 = transform.structured.pad %2 {pack_paddings = [1, 1, 0]}
// CHECK: %{{.*}} = transform.structured.vectorize %[[PADDED]] {vectorize_padding = true}
%4 = transform.structured.vectorize %3 {vectorize_padding = true}
Expand All @@ -19,7 +19,7 @@ transform.structured.canonicalized_sequence {
transform.structured.vectorize %5
// CHECK: bufferize
bufferize
// CHECK: lower_vectors {multireduction_lowering = "innerreduce"}
// CHECK: lower_vectors {contraction_lowering = "outerproduct", multireduction_lowering = "innerreduce", split_transfers = "linalg-copy", stages = [0, 1, 2, 3, 4, 5, 6], transpose_avx2_lowering = false, transpose_lowering = "eltwise", unroll_vector_transfers = true}
lower_vectors { multireduction_lowering = "innerreduce"}
// CHECK: lower_to_llvm
lower_to_llvm
Expand Down
2 changes: 1 addition & 1 deletion third_party/llvm-project
2 changes: 1 addition & 1 deletion third_party/mlir-hlo