Skip to content

Commit

Permalink
Integrate llvm-project @266a5a9cb9daa96c1eeaebc18e10f5a37d638734 (#17911
Browse files Browse the repository at this point in the history
)

Bump llvm-project to
llvm/llvm-project@266a5a9

Update torch-mlir in IREE third_party (TODO: bump torch-mlir and update
to bumped submodule):
- Updated all uses of `linalg::MatmulUnsignedOp` to `linalg::MatmulOp`
with TypeFnAttr

---------

Signed-off-by: aviator19941 <[email protected]>
Signed-off-by: Max Dawkins <[email protected]>
Co-authored-by: Max Dawkins <[email protected]>
  • Loading branch information
aviator19941 and Max191 authored Jul 16, 2024
1 parent 3dffadb commit 30e2c20
Show file tree
Hide file tree
Showing 9 changed files with 26 additions and 25 deletions.
7 changes: 2 additions & 5 deletions compiler/plugins/target/LLVMCPU/LLVMTargetOptions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,11 +49,8 @@ bool resolveCPUAndCPUFeatures(llvm::StringRef inCpu,
}
outCpu = triple.isX86() ? llvm::sys::getHostCPUName().str() : "";
llvm::SubtargetFeatures features;
llvm::StringMap<bool> hostFeatures;
if (llvm::sys::getHostCPUFeatures(hostFeatures)) {
for (auto &feature : hostFeatures) {
features.AddFeature(feature.first(), feature.second);
}
for (auto &feature : llvm::sys::getHostCPUFeatures()) {
features.AddFeature(feature.first(), feature.second);
}
outCpuFeatures = features.getString();
} else {
Expand Down
19 changes: 6 additions & 13 deletions compiler/src/iree/compiler/GlobalOptimization/OptimizeNumerics.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -239,22 +239,15 @@ struct LinalgFpMatmulToLowP : public OpRewritePattern<linalg::MatmulOp> {
castNumeric(rhsParams->producer, rhsLowPType, isSigned, rewriter);
Value newAccum =
castNumeric(accumParams->producer, accumLowPType, isSigned, rewriter);
Value newResult;

if (isSigned) {
newResult = rewriter
.create<linalg::MatmulOp>(loc, ValueRange{newLhs, newRhs},
ValueRange{newAccum})
.getResult(0);
} else {
newResult = rewriter
.create<linalg::MatmulUnsignedOp>(
loc, ValueRange{newLhs, newRhs}, ValueRange{newAccum})
.getResult(0);
auto newMatmulOp = rewriter.create<linalg::MatmulOp>(
loc, ValueRange{newLhs, newRhs}, ValueRange{newAccum});
if (!isSigned) {
newMatmulOp.setCast(linalg::TypeFn::cast_unsigned);
}

// Cast back.
newResult = castNumeric(newResult, origResultType, isSigned, rewriter);
Value newResult = castNumeric(newMatmulOp.getResult(0), origResultType,
isSigned, rewriter);
rewriter.replaceOp(matmulOp, ValueRange{newResult});

return success();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include "iree/compiler/GlobalOptimization/Passes.h"
#include "iree/compiler/GlobalOptimization/Utils.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Linalg/IR/Linalg.h"
Expand Down Expand Up @@ -316,8 +317,11 @@ class NamedImplicitCastOpConversion : public OpInterfaceRewritePattern<OpTy> {
// Signed operations can only be folded with (implicitly) signed
// linalg named ops
if (llvm::isa<arith::ExtSIOp>(*castOp)) {
return !llvm::isa<linalg::MatmulUnsignedOp,
linalg::PoolingNhwcMaxUnsignedOp,
if (auto matmul =
llvm::dyn_cast<linalg::MatmulOp>(namedOp.getOperation())) {
return matmul.getCast() != linalg::TypeFn::cast_unsigned;
}
return !llvm::isa<linalg::PoolingNhwcMaxUnsignedOp,
linalg::PoolingNhwcMinUnsignedOp,
linalg::PoolingNwcMaxUnsignedOp,
linalg::PoolingNwcMinUnsignedOp>(namedOp);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ util.func public @matmul_i8_i8_i32_unsigned(%arg0 : tensor<5x3xf32>, %arg1 : ten
%lhs = util.numeric.optional_narrow %arg0 : tensor<5x3xf32> as ui7 {max_value = 127 : ui7, min_value = 0 : ui7}
%rhs = util.numeric.optional_narrow %arg1 : tensor<3x1xf32> as ui7 {max_value = 127 : ui7, min_value = 0 : ui7}
%init = util.numeric.optional_narrow %arg2 : tensor<5x1xf32> as ui0
// CHECK: %[[RESULT:.*]] = linalg.matmul_unsigned ins(%[[LHS]], %[[RHS]] : tensor<5x3xi8>, tensor<3x1xi8>) outs(%[[INIT]] : tensor<5x1xi32>)
// CHECK: %[[RESULT:.*]] = linalg.matmul {cast = #linalg.type_fn<cast_unsigned>} ins(%[[LHS]], %[[RHS]] : tensor<5x3xi8>, tensor<3x1xi8>) outs(%[[INIT]] : tensor<5x1xi32>)
%2 = linalg.matmul ins(%lhs, %rhs : tensor<5x3xf32>, tensor<3x1xf32>) outs(%init : tensor<5x1xf32>) -> tensor<5x1xf32>
// CHECK: arith.uitofp %[[RESULT]] : tensor<5x1xi32> to tensor<5x1xf32>
util.return %2 : tensor<5x1xf32>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -678,14 +678,14 @@ util.func public @unsigned_matmul_extsi(%arg0 : tensor<10x20xi32>,
%2 = tensor.empty() : tensor<10x40xi32>
%3 = arith.constant 0 : i32
%4 = linalg.fill ins(%3 : i32) outs(%2 : tensor<10x40xi32>) -> tensor<10x40xi32>
%5 = linalg.matmul_unsigned ins(%arg0, %1 : tensor<10x20xi32>, tensor<20x40xi32>)
%5 = linalg.matmul {cast = #linalg.type_fn<cast_unsigned>} ins(%arg0, %1 : tensor<10x20xi32>, tensor<20x40xi32>)
outs(%4 : tensor<10x40xi32>) -> tensor<10x40xi32>
util.return %5 : tensor<10x40xi32>
}
// CHECK-LABEL: util.func public @unsigned_matmul_extsi
// CHECK-SAME: %[[ARG0:.+]]: tensor<10x20xi32>
// CHECK: %[[GEN:.+]] = linalg.generic
// CHECK: %[[RESULT:.+]] = linalg.matmul_unsigned ins(%[[ARG0]], %[[GEN]]
// CHECK: %[[RESULT:.+]] = linalg.matmul {cast = #linalg.type_fn<cast_unsigned>} ins(%[[ARG0]], %[[GEN]]
// CHECK: util.return %[[RESULT]]

// -----
Expand Down
5 changes: 5 additions & 0 deletions tests/e2e/stablehlo_ops/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,11 @@ iree_check_single_backend_test_suite(
],
driver = "local-task",
input_type = "stablehlo",
tags = [
# round_nearest_afz/fmodf fail with a wasm target, just disable all tests there for now
# undefined symbol: round_nearest_afz/fmodf
"nowasm",
],
target_backend = "llvm-cpu",
)

Expand Down
2 changes: 2 additions & 0 deletions tests/e2e/stablehlo_ops/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,8 @@ iree_check_single_backend_test_suite(
"--iree-input-demote-f64-to-f32"
INPUT_TYPE
"stablehlo"
LABELS
"nowasm"
)

iree_check_single_backend_test_suite(
Expand Down
2 changes: 1 addition & 1 deletion third_party/llvm-project
Submodule llvm-project updated 4139 files
2 changes: 1 addition & 1 deletion third_party/torch-mlir

0 comments on commit 30e2c20

Please sign in to comment.