Skip to content

Commit

Permalink
Integrate llvm-project and bump dependencies. (#10140)
Browse files Browse the repository at this point in the history
* llvm-project: 619fd8c2ab505d8f79cbbbe3fd09b02f6640e1b1
* mlir-hlo: cb55a7168c1841d05287677746a39a5de7cb855f
* tensorflow: fc4021a8dd654606cd95e61a033691157853e122

Additional changes:
* rename member functions for tenor ops
* Remove reluN tosa tests
* carry patches for llvm and mhlo
  • Loading branch information
ThomasRaoux authored Aug 19, 2022
1 parent cb0f8d4 commit 979d6ea
Show file tree
Hide file tree
Showing 20 changed files with 18 additions and 40 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -445,7 +445,7 @@ struct SwapExtractSliceWithTiledProducer

LogicalResult matchAndRewrite(tensor::ExtractSliceOp sliceOp,
PatternRewriter &rewriter) const override {
OpResult producer = sliceOp.source().dyn_cast<OpResult>();
OpResult producer = sliceOp.getSource().dyn_cast<OpResult>();
if (!producer) {
return rewriter.notifyMatchFailure(sliceOp, "source uses bb arg");
}
Expand Down Expand Up @@ -473,7 +473,7 @@ struct SwapExtractSliceWithDispatchTensorLoad
LogicalResult matchAndRewrite(tensor::ExtractSliceOp sliceOp,
PatternRewriter &rewriter) const override {
auto loadOp =
sliceOp.source().getDefiningOp<IREE::Flow::DispatchTensorLoadOp>();
sliceOp.getSource().getDefiningOp<IREE::Flow::DispatchTensorLoadOp>();
if (!loadOp) return failure();

SmallVector<OpFoldResult> combinedOffsets, combinedSizes, combinedStrides;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -461,7 +461,7 @@ struct ConvertMHLOToLinalgOnTensorsPass
RewritePatternSet patterns(&getContext());
MLIRContext *context = &getContext();

auto typeConverter = mhlo::createHloToLinalgSignedIntegerConverter();
auto typeConverter = mhlo::createHloToLinalgTypeConverter();
typeConverter->addArgumentMaterialization(scalarToTensor);
// NOTE: not using corresponding setupMHLOToFlowPatterns because the entire
// MHLO dialects are marked illegal by this pass.
Expand Down
2 changes: 1 addition & 1 deletion integrations/tensorflow/WORKSPACE
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")

TENSORFLOW_COMMIT = "1986967601040a368e44b79339567d49f72a368f"
TENSORFLOW_COMMIT = "fc4021a8dd654606cd95e61a033691157853e122"

git_repository(
name = "org_tensorflow",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1486,7 +1486,7 @@ struct FoldTensorCastOp : public OpInterfaceRewritePattern<LinalgExtOp> {
for (OpOperand *opOperand : op.getInputOperands()) {
auto tensorCastOp = opOperand->get().getDefiningOp<tensor::CastOp>();
newOperands.push_back(canFoldIntoConsumerOp(tensorCastOp)
? tensorCastOp.source()
? tensorCastOp.getSource()
: opOperand->get());
}
// Init tensors may fold, in which case the resultType must also change.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ FailureOr<FusionResult> LinalgExtFusionPattern::returningMatchAndRewrite(
.getDefiningOp<tensor::ExtractSliceOp>();
if (!sliceOp)
return failure();
auto producerOp = sliceOp.source().getDefiningOp<TilingInterface>();
auto producerOp = sliceOp.getSource().getDefiningOp<TilingInterface>();
if (!producerOp || producerOp->getNumResults() != 1)
return failure();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ struct OpTilingPattern : public OpInterfaceRewritePattern<TilingInterface> {
/// tensor.extract_slice` to `tensor.extract_slice -> `TilingInterface`.
FailureOr<Operation *> SwapTilingInterfaceOp::returningMatchAndRewrite(
tensor::ExtractSliceOp sliceOp, PatternRewriter &rewriter) const {
auto sourceOp = sliceOp.source().getDefiningOp<TilingInterface>();
auto sourceOp = sliceOp.getSource().getDefiningOp<TilingInterface>();
if (!sourceOp)
return failure();
SmallVector<Operation *> tiledOps = sourceOp.getTiledImplementation(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ Value mlir::iree_compiler::IREE::LinalgExt::createMatchingSubsetInsertOp(
OpBuilder &b, Location loc, tensor::ExtractSliceOp subsetExtractOp,
Value source, Value dest) {
return b.create<tensor::InsertSliceOp>(
loc, subsetExtractOp.source().getType(), source, dest,
loc, subsetExtractOp.getSource().getType(), source, dest,
subsetExtractOp.offsets(), subsetExtractOp.sizes(),
subsetExtractOp.strides(), subsetExtractOp.static_offsets(),
subsetExtractOp.static_sizes(), subsetExtractOp.static_strides());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,7 @@ static linalg::LinalgOp findSingleLinalgOpDefiningAll(ValueRange range) {
// mechanism of tracking op replacement at creation, or even different
// patterns that identify the "main" result of a transformation.
while (auto castOp = value.getDefiningOp<tensor::CastOp>())
value = castOp.source();
value = castOp.getSource();

if (auto currentSourceOp = value.getDefiningOp<linalg::LinalgOp>()) {
if (!sourceOp || sourceOp == currentSourceOp) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1487,7 +1487,7 @@ struct FoldTensorCastOp : public OpInterfaceRewritePattern<LinalgExtOp> {
for (OpOperand *opOperand : op.getInputOperands()) {
auto tensorCastOp = opOperand->get().getDefiningOp<tensor::CastOp>();
newOperands.push_back(canFoldIntoConsumerOp(tensorCastOp)
? tensorCastOp.source()
? tensorCastOp.getSource()
: opOperand->get());
}
// Init tensors may fold, in which case the resultType must also change.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -362,8 +362,7 @@ void TilingInterfaceTilingPass::runOnOperation() {
buildFlowWorkgroupInfoOp<IREE::Input::DispatchWorkgroupIDOp>(
builder, dim),
buildFlowWorkgroupInfoOp<IREE::Input::DispatchWorkgroupCountOp>(
builder, dim),
linalg::DistributionMethod::Cyclic};
builder, dim)};
}
return procInfo;
}};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ FailureOr<FusionResult> LinalgExtFusionPattern::returningMatchAndRewrite(
.getDefiningOp<tensor::ExtractSliceOp>();
if (!sliceOp)
return failure();
auto producerOp = sliceOp.source().getDefiningOp<TilingInterface>();
auto producerOp = sliceOp.getSource().getDefiningOp<TilingInterface>();
if (!producerOp || producerOp->getNumResults() != 1)
return failure();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ struct OpTilingPattern : public OpInterfaceRewritePattern<TilingInterface> {
/// tensor.extract_slice` to `tensor.extract_slice -> `TilingInterface`.
FailureOr<Operation *> SwapTilingInterfaceOp::returningMatchAndRewrite(
tensor::ExtractSliceOp sliceOp, PatternRewriter &rewriter) const {
auto sourceOp = sliceOp.source().getDefiningOp<TilingInterface>();
auto sourceOp = sliceOp.getSource().getDefiningOp<TilingInterface>();
if (!sourceOp)
return failure();
SmallVector<Operation *> tiledOps = sourceOp.getTiledImplementation(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ Value mlir::iree_compiler::IREE::LinalgExt::createMatchingSubsetInsertOp(
OpBuilder &b, Location loc, tensor::ExtractSliceOp subsetExtractOp,
Value source, Value dest) {
return b.create<tensor::InsertSliceOp>(
loc, subsetExtractOp.source().getType(), source, dest,
loc, subsetExtractOp.getSource().getType(), source, dest,
subsetExtractOp.offsets(), subsetExtractOp.sizes(),
subsetExtractOp.strides(), subsetExtractOp.static_offsets(),
subsetExtractOp.static_sizes(), subsetExtractOp.static_strides());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,7 @@ static linalg::LinalgOp findSingleLinalgOpDefiningAll(ValueRange range) {
// mechanism of tracking op replacement at creation, or even different
// patterns that identify the "main" result of a transformation.
while (auto castOp = value.getDefiningOp<tensor::CastOp>())
value = castOp.source();
value = castOp.getSource();

if (auto currentSourceOp = value.getDefiningOp<linalg::LinalgOp>()) {
if (!sourceOp || sourceOp == currentSourceOp) {
Expand Down
4 changes: 0 additions & 4 deletions tests/e2e/tosa_ops/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@ LLVM_SRCS = enforce_glob(
"pad.mlir",
"reciprocal.mlir",
"reduce.mlir",
"reluN.mlir",
"reshape.mlir",
"rsqrt.mlir",
"select.mlir",
Expand Down Expand Up @@ -103,7 +102,6 @@ VMVX_SRCS = enforce_glob(
"negate.mlir",
"pad.mlir",
"reciprocal.mlir",
"reluN.mlir",
"reshape.mlir",
"rsqrt.mlir",
"select.mlir",
Expand Down Expand Up @@ -162,7 +160,6 @@ VMVX_MICROKERNELS_SRCS = enforce_glob(
"pad.mlir",
"reciprocal.mlir",
"reduce.mlir",
"reluN.mlir",
"reshape.mlir",
"rsqrt.mlir",
"select.mlir",
Expand Down Expand Up @@ -220,7 +217,6 @@ VULKAN_SRCS = enforce_glob(
"pad.mlir",
"reciprocal.mlir",
"reduce.mlir",
"reluN.mlir",
"reshape.mlir",
"rsqrt.mlir",
"select.mlir",
Expand Down
4 changes: 0 additions & 4 deletions tests/e2e/tosa_ops/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@ iree_check_single_backend_test_suite(
"pad.mlir"
"reciprocal.mlir"
"reduce.mlir"
"reluN.mlir"
"reshape.mlir"
"rsqrt.mlir"
"select.mlir"
Expand Down Expand Up @@ -95,7 +94,6 @@ iree_check_single_backend_test_suite(
"negate.mlir"
"pad.mlir"
"reciprocal.mlir"
"reluN.mlir"
"reshape.mlir"
"rsqrt.mlir"
"select.mlir"
Expand Down Expand Up @@ -147,7 +145,6 @@ iree_check_single_backend_test_suite(
"pad.mlir"
"reciprocal.mlir"
"reduce.mlir"
"reluN.mlir"
"reshape.mlir"
"rsqrt.mlir"
"select.mlir"
Expand Down Expand Up @@ -200,7 +197,6 @@ iree_check_single_backend_test_suite(
"pad.mlir"
"reciprocal.mlir"
"reduce.mlir"
"reluN.mlir"
"reshape.mlir"
"rsqrt.mlir"
"select.mlir"
Expand Down
13 changes: 0 additions & 13 deletions tests/e2e/tosa_ops/reluN.mlir

This file was deleted.

2 changes: 1 addition & 1 deletion third_party/llvm-project
2 changes: 1 addition & 1 deletion third_party/mlir-hlo
Original file line number Diff line number Diff line change
Expand Up @@ -478,7 +478,7 @@ struct FoldTensorCastOp : public OpInterfaceRewritePattern<TMTensorOp> {
for (OpOperand *opOperand : op.getInputOperands()) {
auto tensorCastOp = opOperand->get().getDefiningOp<tensor::CastOp>();
newOperands.push_back(canFoldIntoConsumerOp(tensorCastOp)
? tensorCastOp.source()
? tensorCastOp.getSource()
: opOperand->get());
}
// Init tensors may fold, in which case the resultType must also change.
Expand Down

0 comments on commit 979d6ea

Please sign in to comment.