From 30e2c203ca2c4368a6812eeec41e95b43c777c34 Mon Sep 17 00:00:00 2001 From: Avinash Sharma Date: Tue, 16 Jul 2024 16:57:37 -0700 Subject: [PATCH] Integrate llvm-project @266a5a9cb9daa96c1eeaebc18e10f5a37d638734 (#17911) Bump llvm-project to https://github.com/llvm/llvm-project/commit/266a5a9cb9daa96c1eeaebc18e10f5a37d638734 Update torch-mlir in IREE third_party (TODO: bump torch-mlir and update to bumped submodule): - Updated all uses of `linalg::MatmulUnsignedOp` to `linalg::MatmulOp` with TypeFnAttr --------- Signed-off-by: aviator19941 Signed-off-by: Max Dawkins Co-authored-by: Max Dawkins --- .../target/LLVMCPU/LLVMTargetOptions.cpp | 7 ++----- .../GlobalOptimization/OptimizeNumerics.cpp | 19 ++++++------------- .../GlobalOptimization/RaiseSpecialOps.cpp | 8 ++++++-- .../test/optimize_numerics.mlir | 2 +- .../test/raise_special_ops.mlir | 4 ++-- tests/e2e/stablehlo_ops/BUILD.bazel | 5 +++++ tests/e2e/stablehlo_ops/CMakeLists.txt | 2 ++ third_party/llvm-project | 2 +- third_party/torch-mlir | 2 +- 9 files changed, 26 insertions(+), 25 deletions(-) diff --git a/compiler/plugins/target/LLVMCPU/LLVMTargetOptions.cpp b/compiler/plugins/target/LLVMCPU/LLVMTargetOptions.cpp index b8c2513bf2ae..a9da1c9b3f26 100644 --- a/compiler/plugins/target/LLVMCPU/LLVMTargetOptions.cpp +++ b/compiler/plugins/target/LLVMCPU/LLVMTargetOptions.cpp @@ -49,11 +49,8 @@ bool resolveCPUAndCPUFeatures(llvm::StringRef inCpu, } outCpu = triple.isX86() ? llvm::sys::getHostCPUName().str() : ""; llvm::SubtargetFeatures features; - llvm::StringMap hostFeatures; - if (llvm::sys::getHostCPUFeatures(hostFeatures)) { - for (auto &feature : hostFeatures) { - features.AddFeature(feature.first(), feature.second); - } + for (auto &feature : llvm::sys::getHostCPUFeatures()) { + features.AddFeature(feature.first(), feature.second); } outCpuFeatures = features.getString(); } else { diff --git a/compiler/src/iree/compiler/GlobalOptimization/OptimizeNumerics.cpp b/compiler/src/iree/compiler/GlobalOptimization/OptimizeNumerics.cpp index e4758ff80497..329354e2e84b 100644 --- a/compiler/src/iree/compiler/GlobalOptimization/OptimizeNumerics.cpp +++ b/compiler/src/iree/compiler/GlobalOptimization/OptimizeNumerics.cpp @@ -239,22 +239,15 @@ struct LinalgFpMatmulToLowP : public OpRewritePattern { castNumeric(rhsParams->producer, rhsLowPType, isSigned, rewriter); Value newAccum = castNumeric(accumParams->producer, accumLowPType, isSigned, rewriter); - Value newResult; - if (isSigned) { - newResult = rewriter - .create(loc, ValueRange{newLhs, newRhs}, - ValueRange{newAccum}) - .getResult(0); - } else { - newResult = rewriter - .create( - loc, ValueRange{newLhs, newRhs}, ValueRange{newAccum}) - .getResult(0); + auto newMatmulOp = rewriter.create( + loc, ValueRange{newLhs, newRhs}, ValueRange{newAccum}); + if (!isSigned) { + newMatmulOp.setCast(linalg::TypeFn::cast_unsigned); } - // Cast back. - newResult = castNumeric(newResult, origResultType, isSigned, rewriter); + Value newResult = castNumeric(newMatmulOp.getResult(0), origResultType, + isSigned, rewriter); rewriter.replaceOp(matmulOp, ValueRange{newResult}); return success(); diff --git a/compiler/src/iree/compiler/GlobalOptimization/RaiseSpecialOps.cpp b/compiler/src/iree/compiler/GlobalOptimization/RaiseSpecialOps.cpp index 879ec00c6bba..8c267e204b41 100644 --- a/compiler/src/iree/compiler/GlobalOptimization/RaiseSpecialOps.cpp +++ b/compiler/src/iree/compiler/GlobalOptimization/RaiseSpecialOps.cpp @@ -13,6 +13,7 @@ #include "iree/compiler/GlobalOptimization/Passes.h" #include "iree/compiler/GlobalOptimization/Utils.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/Support/Casting.h" #include "mlir/Dialect/Affine/IR/AffineOps.h" #include "mlir/Dialect/Arith/IR/Arith.h" #include "mlir/Dialect/Linalg/IR/Linalg.h" @@ -316,8 +317,11 @@ class NamedImplicitCastOpConversion : public OpInterfaceRewritePattern { // Signed operations can only be folded with (implicitly) signed // linalg named ops if (llvm::isa(*castOp)) { - return !llvm::isa(namedOp.getOperation())) { + return matmul.getCast() != linalg::TypeFn::cast_unsigned; + } + return !llvm::isa(namedOp); diff --git a/compiler/src/iree/compiler/GlobalOptimization/test/optimize_numerics.mlir b/compiler/src/iree/compiler/GlobalOptimization/test/optimize_numerics.mlir index f31bb33ef73d..51bc2c3f1585 100644 --- a/compiler/src/iree/compiler/GlobalOptimization/test/optimize_numerics.mlir +++ b/compiler/src/iree/compiler/GlobalOptimization/test/optimize_numerics.mlir @@ -8,7 +8,7 @@ util.func public @matmul_i8_i8_i32_unsigned(%arg0 : tensor<5x3xf32>, %arg1 : ten %lhs = util.numeric.optional_narrow %arg0 : tensor<5x3xf32> as ui7 {max_value = 127 : ui7, min_value = 0 : ui7} %rhs = util.numeric.optional_narrow %arg1 : tensor<3x1xf32> as ui7 {max_value = 127 : ui7, min_value = 0 : ui7} %init = util.numeric.optional_narrow %arg2 : tensor<5x1xf32> as ui0 - // CHECK: %[[RESULT:.*]] = linalg.matmul_unsigned ins(%[[LHS]], %[[RHS]] : tensor<5x3xi8>, tensor<3x1xi8>) outs(%[[INIT]] : tensor<5x1xi32>) + // CHECK: %[[RESULT:.*]] = linalg.matmul {cast = #linalg.type_fn} ins(%[[LHS]], %[[RHS]] : tensor<5x3xi8>, tensor<3x1xi8>) outs(%[[INIT]] : tensor<5x1xi32>) %2 = linalg.matmul ins(%lhs, %rhs : tensor<5x3xf32>, tensor<3x1xf32>) outs(%init : tensor<5x1xf32>) -> tensor<5x1xf32> // CHECK: arith.uitofp %[[RESULT]] : tensor<5x1xi32> to tensor<5x1xf32> util.return %2 : tensor<5x1xf32> diff --git a/compiler/src/iree/compiler/GlobalOptimization/test/raise_special_ops.mlir b/compiler/src/iree/compiler/GlobalOptimization/test/raise_special_ops.mlir index 9dc25b7700ca..a1cd2d63216e 100644 --- a/compiler/src/iree/compiler/GlobalOptimization/test/raise_special_ops.mlir +++ b/compiler/src/iree/compiler/GlobalOptimization/test/raise_special_ops.mlir @@ -678,14 +678,14 @@ util.func public @unsigned_matmul_extsi(%arg0 : tensor<10x20xi32>, %2 = tensor.empty() : tensor<10x40xi32> %3 = arith.constant 0 : i32 %4 = linalg.fill ins(%3 : i32) outs(%2 : tensor<10x40xi32>) -> tensor<10x40xi32> - %5 = linalg.matmul_unsigned ins(%arg0, %1 : tensor<10x20xi32>, tensor<20x40xi32>) + %5 = linalg.matmul {cast = #linalg.type_fn} ins(%arg0, %1 : tensor<10x20xi32>, tensor<20x40xi32>) outs(%4 : tensor<10x40xi32>) -> tensor<10x40xi32> util.return %5 : tensor<10x40xi32> } // CHECK-LABEL: util.func public @unsigned_matmul_extsi // CHECK-SAME: %[[ARG0:.+]]: tensor<10x20xi32> // CHECK: %[[GEN:.+]] = linalg.generic -// CHECK: %[[RESULT:.+]] = linalg.matmul_unsigned ins(%[[ARG0]], %[[GEN]] +// CHECK: %[[RESULT:.+]] = linalg.matmul {cast = #linalg.type_fn} ins(%[[ARG0]], %[[GEN]] // CHECK: util.return %[[RESULT]] // ----- diff --git a/tests/e2e/stablehlo_ops/BUILD.bazel b/tests/e2e/stablehlo_ops/BUILD.bazel index 799ca1942652..4d8a31afcf59 100644 --- a/tests/e2e/stablehlo_ops/BUILD.bazel +++ b/tests/e2e/stablehlo_ops/BUILD.bazel @@ -88,6 +88,11 @@ iree_check_single_backend_test_suite( ], driver = "local-task", input_type = "stablehlo", + tags = [ + # round_nearest_afz/fmodf fail with a wasm target, just disable all tests there for now + # undefined symbol: round_nearest_afz/fmodf + "nowasm", + ], target_backend = "llvm-cpu", ) diff --git a/tests/e2e/stablehlo_ops/CMakeLists.txt b/tests/e2e/stablehlo_ops/CMakeLists.txt index fc8f20e7336b..eeae196ec4b4 100644 --- a/tests/e2e/stablehlo_ops/CMakeLists.txt +++ b/tests/e2e/stablehlo_ops/CMakeLists.txt @@ -83,6 +83,8 @@ iree_check_single_backend_test_suite( "--iree-input-demote-f64-to-f32" INPUT_TYPE "stablehlo" + LABELS + "nowasm" ) iree_check_single_backend_test_suite( diff --git a/third_party/llvm-project b/third_party/llvm-project index c9f6518f742c..1f11b9fed233 160000 --- a/third_party/llvm-project +++ b/third_party/llvm-project @@ -1 +1 @@ -Subproject commit c9f6518f742c88bda309d5331e0a5d4664387f94 +Subproject commit 1f11b9fed2337ea24d137ff82fec75bddcd85b3c diff --git a/third_party/torch-mlir b/third_party/torch-mlir index 5e4f00acb13f..2f239acda848 160000 --- a/third_party/torch-mlir +++ b/third_party/torch-mlir @@ -1 +1 @@ -Subproject commit 5e4f00acb13f3f849a05e5ac28ee39307a5fdbff +Subproject commit 2f239acda84814ff94bbe9061789ea151d5d25fd