Skip to content

Commit

Permalink
Integrate LLVM at llvm/llvm-project@8396aeb07cdd
Browse files Browse the repository at this point in the history
Updates LLVM usage to match
[8396aeb07cdd](llvm/llvm-project@8396aeb07cdd)

PiperOrigin-RevId: 366034463
  • Loading branch information
iree-copybara-bot authored and iree-github-actions-bot committed Mar 31, 2021
1 parent 16670ba commit fda00cf
Show file tree
Hide file tree
Showing 10 changed files with 64 additions and 65 deletions.
2 changes: 1 addition & 1 deletion SUBMODULE_VERSIONS.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
b1fbd33c06cdb0024c67733c6fdec2009d17b384 third_party/googletest
88b845dee001723c4a0db1fe5477de735b6d3bb0 third_party/liburing
fad5434701aa52c920404c81532aa3ebf44bc3b7 third_party/llvm-bazel
afed50a14b34eb619624aed5c85f4f610f360650 third_party/llvm-project
8396aeb07cddd8ab9a6a154a4ab7ac56fc24bda5 third_party/llvm-project
dde739ffd00a6fa99175cf3c0f28e4b763dc6f5f third_party/mlir-emitc
e78c59d9277935f1d4d3b40d08e447be91be832a third_party/mlir-hlo
2b2bd45bbf9be04fd22ece5cc1f54679202e9257 third_party/pffft
Expand Down
2 changes: 1 addition & 1 deletion iree/compiler/Conversion/Common/LinalgBufferizePass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -462,7 +462,7 @@ static LogicalResult convertTransferOp(OpBuilder &b,
b.create<vector::TransferWriteOp>(
loc, writeOp.vector(), newInputBuffer, writeOp.indices(),
writeOp.permutation_map(),
writeOp.masked() ? *writeOp.masked() : ArrayAttr());
writeOp.in_bounds() ? *writeOp.in_bounds() : ArrayAttr());
}
return success();
}
Expand Down
1 change: 0 additions & 1 deletion iree/compiler/Conversion/Common/Passes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ void addLinalgBufferizePasses(OpPassManager &passManager,
passManager.addNestedPass<FuncOp>(createCanonicalizerPass());
passManager.addNestedPass<FuncOp>(createCSEPass());
passManager.addNestedPass<FuncOp>(createBufferAllocViewCleanUpPass());
passManager.addPass(createCopyRemovalPass());
// passManager.addPass(createBufferHoistingPass());
// TODO(nicolasvasilache): bug in buffer loop hoisting with
// dynamic_linalg_matmul_on_tensors_fuse_0.mlir
Expand Down
26 changes: 13 additions & 13 deletions iree/compiler/Conversion/Common/test/linalg_bufferize.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -278,25 +278,25 @@ hal.interface @legacy_io attributes {sym_visibility = "private"} {
// %4 = flow.dispatch.tensor.load %0, offsets = [%c0, %c0], sizes = [%c1, %c3], strides = [%c1, %c1] : !flow.dispatch.tensor<readonly:2x3xf32> -> tensor<2x3xf32>
// %5 = flow.dispatch.tensor.load %1, offsets = [%c0, %c0], sizes = [%c3, %c1], strides = [%c1, %c1] : !flow.dispatch.tensor<readonly:3x4xf32> -> tensor<3x1xf32>
// %6 = flow.dispatch.tensor.load %2, offsets = [%c0, %c0], sizes = [%c1, %c1], strides = [%c1, %c1] : !flow.dispatch.tensor<readonly:2x4xf32> -> tensor<2x1xf32>
// %7 = vector.transfer_read %4[%c0, %c0], %cst {masked = [false, false]} : tensor<2x3xf32>, vector<1x1xf32>
// %8 = vector.transfer_read %4[%c0, %c1], %cst {masked = [false, false]} : tensor<2x3xf32>, vector<1x1xf32>
// %9 = vector.transfer_read %4[%c0, %c2], %cst {masked = [false, false]} : tensor<2x3xf32>, vector<1x1xf32>
// %10 = vector.transfer_read %4[%c1, %c0], %cst {masked = [false, false]} : tensor<2x3xf32>, vector<1x1xf32>
// %11 = vector.transfer_read %4[%c1, %c1], %cst {masked = [false, false]} : tensor<2x3xf32>, vector<1x1xf32>
// %12 = vector.transfer_read %4[%c1, %c2], %cst {masked = [false, false]} : tensor<2x3xf32>, vector<1x1xf32>
// %13 = vector.transfer_read %5[%c0, %c0], %cst {masked = [false, false]} : tensor<3x1xf32>, vector<1x1xf32>
// %14 = vector.transfer_read %5[%c1, %c0], %cst {masked = [false, false]} : tensor<3x1xf32>, vector<1x1xf32>
// %15 = vector.transfer_read %5[%c2, %c0], %cst {masked = [false, false]} : tensor<3x1xf32>, vector<1x1xf32>
// %16 = vector.transfer_read %6[%c0, %c0], %cst {masked = [false, false]} : tensor<2x1xf32>, vector<1x1xf32>
// %17 = vector.transfer_read %6[%c1, %c0], %cst {masked = [false, false]} : tensor<2x1xf32>, vector<1x1xf32>
// %7 = vector.transfer_read %4[%c0, %c0], %cst {in_bounds = [true, true]} : tensor<2x3xf32>, vector<1x1xf32>
// %8 = vector.transfer_read %4[%c0, %c1], %cst {in_bounds = [true, true]} : tensor<2x3xf32>, vector<1x1xf32>
// %9 = vector.transfer_read %4[%c0, %c2], %cst {in_bounds = [true, true]} : tensor<2x3xf32>, vector<1x1xf32>
// %10 = vector.transfer_read %4[%c1, %c0], %cst {in_bounds = [true, true]} : tensor<2x3xf32>, vector<1x1xf32>
// %11 = vector.transfer_read %4[%c1, %c1], %cst {in_bounds = [true, true]} : tensor<2x3xf32>, vector<1x1xf32>
// %12 = vector.transfer_read %4[%c1, %c2], %cst {in_bounds = [true, true]} : tensor<2x3xf32>, vector<1x1xf32>
// %13 = vector.transfer_read %5[%c0, %c0], %cst {in_bounds = [true, true]} : tensor<3x1xf32>, vector<1x1xf32>
// %14 = vector.transfer_read %5[%c1, %c0], %cst {in_bounds = [true, true]} : tensor<3x1xf32>, vector<1x1xf32>
// %15 = vector.transfer_read %5[%c2, %c0], %cst {in_bounds = [true, true]} : tensor<3x1xf32>, vector<1x1xf32>
// %16 = vector.transfer_read %6[%c0, %c0], %cst {in_bounds = [true, true]} : tensor<2x1xf32>, vector<1x1xf32>
// %17 = vector.transfer_read %6[%c1, %c0], %cst {in_bounds = [true, true]} : tensor<2x1xf32>, vector<1x1xf32>
// %18 = vector.contract {indexing_maps = [#map0, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} %7, %13, %16 : vector<1x1xf32>, vector<1x1xf32> into vector<1x1xf32>
// %19 = vector.contract {indexing_maps = [#map0, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} %8, %14, %18 : vector<1x1xf32>, vector<1x1xf32> into vector<1x1xf32>
// %20 = vector.contract {indexing_maps = [#map0, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} %9, %15, %19 : vector<1x1xf32>, vector<1x1xf32> into vector<1x1xf32>
// %21 = vector.contract {indexing_maps = [#map0, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} %10, %13, %17 : vector<1x1xf32>, vector<1x1xf32> into vector<1x1xf32>
// %22 = vector.contract {indexing_maps = [#map0, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} %11, %14, %21 : vector<1x1xf32>, vector<1x1xf32> into vector<1x1xf32>
// %23 = vector.contract {indexing_maps = [#map0, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} %12, %15, %22 : vector<1x1xf32>, vector<1x1xf32> into vector<1x1xf32>
// %24 = vector.transfer_write %20, %6[%c0, %c0] {masked = [false, false]} : vector<1x1xf32>, tensor<2x1xf32>
// %25 = vector.transfer_write %23, %24[%c1, %c0] {masked = [false, false]} : vector<1x1xf32>, tensor<2x1xf32>
// %24 = vector.transfer_write %20, %6[%c0, %c0] {in_bounds = [true, true]} : vector<1x1xf32>, tensor<2x1xf32>
// %25 = vector.transfer_write %23, %24[%c1, %c0] {in_bounds = [true, true]} : vector<1x1xf32>, tensor<2x1xf32>
// flow.dispatch.tensor.store %25, %3, offsets = [%c0, %c0], sizes = [%c1, %c1], strides = [%c1, %c1] : tensor<2x1xf32> -> !flow.dispatch.tensor<writeonly:2x4xf32>
// return
// }
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@
// CHECK: %[[D3:.*]] = vector.contract {{.*}} %[[V3]], %[[V6]], %[[VA]] : vector<1x1xf32>, vector<1x1xf32> into vector<1x1xf32>
// CHECK: %[[D4:.*]] = vector.contract {{.*}} %[[V4]], %[[V7]], %[[D3]] : vector<1x1xf32>, vector<1x1xf32> into vector<1x1xf32>
// CHECK: %[[D5:.*]] = vector.contract {{.*}} %[[V5]], %[[V8]], %[[D4]] : vector<1x1xf32>, vector<1x1xf32> into vector<1x1xf32>
// CHECK: %[[W0:.*]] = vector.transfer_write %[[D2]], %[[I2]][%[[C0]], %[[C0]]] {masked = [false, false]} : vector<1x1xf32>, tensor<2x1xf32>
// CHECK: %[[W1:.*]] = vector.transfer_write %[[D5]], %[[W0]][%[[C1]], %[[C0]]] {masked = [false, false]} : vector<1x1xf32>, tensor<2x1xf32>
// CHECK: %[[W0:.*]] = vector.transfer_write %[[D2]], %[[I2]][%[[C0]], %[[C0]]] {in_bounds = [true, true]} : vector<1x1xf32>, tensor<2x1xf32>
// CHECK: %[[W1:.*]] = vector.transfer_write %[[D5]], %[[W0]][%[[C1]], %[[C0]]] {in_bounds = [true, true]} : vector<1x1xf32>, tensor<2x1xf32>
// CHECK: flow.dispatch.tensor.store %[[W1]]

func @tensor_dispatch_0() {
Expand Down
8 changes: 4 additions & 4 deletions iree/compiler/Conversion/LinalgToSPIRV/ConvertToSPIRVPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -347,10 +347,10 @@ class TransferToCoopMatLoadStore final : public CoopMatOpLowering<OpTy> {
// TODO(thomasraoux): use coloumn major operand when TransfertRead +
// TransposeOp.
if (!op.permutation_map().isMinorIdentity()) return failure();
if (op.masked() &&
llvm::any_of(op.masked()->template cast<ArrayAttr>(),
[](mlir::Attribute maskedDim) {
return maskedDim.cast<BoolAttr>().getValue();
if (op.in_bounds() &&
llvm::any_of(op.in_bounds()->template cast<ArrayAttr>(),
[](mlir::Attribute dimInBounds) {
return !dimInBounds.cast<BoolAttr>().getValue();
}))
return failure();
auto matType = spirv::CooperativeMatrixNVType::get(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -118,16 +118,16 @@ module attributes {gpu.container_module, spv.target_env = #spv.target_env<#spv.v
%c0_i32 = constant 0 : i32
%c0_i8 = constant 0 : i8
// CHECK: %[[C:.+]] = spv.CooperativeMatrixLoadNV %{{.*}}, %{{.*}}, %{{.*}}
%4 = vector.transfer_read %arg2[%c0, %c0], %c0_i32 {masked = [false, false]} : memref<4096x4096xi32>, vector<16x16xi32>
%4 = vector.transfer_read %arg2[%c0, %c0], %c0_i32 {in_bounds = [true, true]} : memref<4096x4096xi32>, vector<16x16xi32>
// CHECK: %[[ACC:.+]] = spv.Variable : !spv.ptr<!spv.coopmatrix<16x16xi32, Subgroup>, Function>
// CHECK: spv.mlir.loop {
// CHECK: spv.Branch ^[[BB:.+]](%{{.*}}, %[[C]] : i32, !spv.coopmatrix<16x16xi32, Subgroup>)
// CHECK: ^[[BB]](%{{.*}}: i32, %[[C1:.+]]: !spv.coopmatrix<16x16xi32, Subgroup>)
%5 = scf.for %arg3 = %c0 to %c4096 step %c32 iter_args(%arg4 = %4) -> (vector<16x16xi32>) {
// CHECK: %[[A:.+]] = spv.CooperativeMatrixLoadNV %{{.*}}, %{{.*}}, %{{.*}}
%6 = vector.transfer_read %arg0[%c0, %arg3], %c0_i8 {masked = [false, false]} : memref<4096x4096xi8>, vector<16x32xi8>
%6 = vector.transfer_read %arg0[%c0, %arg3], %c0_i8 {in_bounds = [true, true]} : memref<4096x4096xi8>, vector<16x32xi8>
// CHECK: %[[B:.+]] = spv.CooperativeMatrixLoadNV %{{.*}}, %{{.*}}, %{{.*}}
%7 = vector.transfer_read %arg1[%arg3, %c0], %c0_i8 {masked = [false, false]} : memref<4096x4096xi8>, vector<32x16xi8>
%7 = vector.transfer_read %arg1[%arg3, %c0], %c0_i8 {in_bounds = [true, true]} : memref<4096x4096xi8>, vector<32x16xi8>
// CHECK: %[[R:.+]] = spv.CooperativeMatrixMulAddNV %[[A]], %[[B]], %[[C1]]
%8 = vector.contract {indexing_maps = [#map1, #map2, #map3], iterator_types = ["parallel", "parallel", "reduction"]} %6, %7, %arg4 : vector<16x32xi8>, vector<32x16xi8> into vector<16x16xi32>
// CHECK: spv.Store "Function" %[[ACC]], %[[R]] : !spv.coopmatrix<16x16xi32, Subgroup>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ module {
memref<8x4xf32, affine_map<(d0, d1) -> (d0 * 64 + d1)>, 3>
%c0 = constant 0 : index
%cst_0 = constant dense<0.000000e+00> : vector<1x4xf32>
vector.transfer_write %cst_0, %1[%c0, %c0] {masked = [false, false]} :
vector.transfer_write %cst_0, %1[%c0, %c0] {in_bounds = [true, true]} :
vector<1x4xf32>, memref<8x4xf32, affine_map<(d0, d1) -> (d0 * 64 + d1)>, 3>
return
}
Expand Down
Loading

0 comments on commit fda00cf

Please sign in to comment.