From 08583d547ac152554ec6b04883685fa771b23eb0 Mon Sep 17 00:00:00 2001 From: Stanley Winata <68087699+raikonenfnu@users.noreply.github.com> Date: Tue, 13 Aug 2024 10:22:27 -0700 Subject: [PATCH] Bump LLVM to llvm/llvm-project@6b7afaa9db8f (#18197) Signed-off-by: Stanley Winata --- .../Codegen/Common/GPU/GPUDistributionPatterns.cpp | 5 ++--- .../Utils/LLVMGPULayoutAnalysisAndDistribution.cpp | 11 ++++------- .../Codegen/SPIRV/SPIRVInitialVectorLowering.cpp | 6 +++--- .../Conversion/MeshToFlow/test/channel_creation.mlir | 2 +- .../Flow/Conversion/MeshToFlow/test/collectives.mlir | 2 +- third_party/llvm-project | 2 +- 6 files changed, 12 insertions(+), 16 deletions(-) diff --git a/compiler/src/iree/compiler/Codegen/Common/GPU/GPUDistributionPatterns.cpp b/compiler/src/iree/compiler/Codegen/Common/GPU/GPUDistributionPatterns.cpp index 17d65350efbe..ad0dbc2c2ee4 100644 --- a/compiler/src/iree/compiler/Codegen/Common/GPU/GPUDistributionPatterns.cpp +++ b/compiler/src/iree/compiler/Codegen/Common/GPU/GPUDistributionPatterns.cpp @@ -423,8 +423,7 @@ struct DistributeReductions final LogicalResult matchAndRewrite(vector::MultiDimReductionOp reductionOp, DistributionSignature &signature, PatternRewriter &rewriter) const override { - auto reductionDims = llvm::to_vector<4>( - reductionOp.getReductionDims().getAsRange()); + ArrayRef reductionDims = reductionOp.getReductionDims(); // TODO: Add support for reductions along multiple dimensions. if (reductionDims.size() > 1) return failure(); @@ -461,7 +460,7 @@ struct DistributeReductions final Value storeVec = rewriter.create( loc, storeVectorType, rewriter.getZeroAttr(storeVectorType)); - int reductionDim = reductionDims[0].getInt(); + int reductionDim = reductionDims[0]; int parallelDim = reductionDim ^ 1; if (!sourceLayout.getLane(reductionDim)) return failure(); diff --git a/compiler/src/iree/compiler/Codegen/LLVMGPU/Utils/LLVMGPULayoutAnalysisAndDistribution.cpp b/compiler/src/iree/compiler/Codegen/LLVMGPU/Utils/LLVMGPULayoutAnalysisAndDistribution.cpp index 9767b719140a..7123e4d9e999 100644 --- a/compiler/src/iree/compiler/Codegen/LLVMGPU/Utils/LLVMGPULayoutAnalysisAndDistribution.cpp +++ b/compiler/src/iree/compiler/Codegen/LLVMGPU/Utils/LLVMGPULayoutAnalysisAndDistribution.cpp @@ -297,8 +297,7 @@ static void propagateLayoutToReduceBroadcastTranspose( if (!layoutMap.count(reductionSrc)) return; // Get the reduction dims - auto reductionDims = - llvm::to_vector(reductionOp.getReductionDims().getAsRange()); + ArrayRef reductionDims = reductionOp.getReductionDims(); // Get the transpose permutation ArrayRef perm = transposeOp.getPermutation(); // Don't support dim-1 broadcasted dims @@ -325,8 +324,7 @@ static void propagateLayoutToReduceBroadcastTranspose( return; // Check that transpose(reductionDim) == broadcastDim // and that the shapes match - for (IntegerAttr dimAttr : reductionDims) { - int64_t dim = dimAttr.getInt(); + for (int64_t dim : reductionDims) { int64_t transposedDim = perm[dim]; if (!broadcastedDims.contains(transposedDim)) return; @@ -816,13 +814,12 @@ static void distributeReductionBroadcastTranspose( return; Location loc = reductionOp.getLoc(); Layout layout = layoutMap.at(source); - auto reductionDims = - llvm::to_vector(reductionOp.getReductionDims().getAsRange()); + ArrayRef reductionDims = reductionOp.getReductionDims(); vector::CombiningKind combiningKind = reductionOp.getKind(); // Only support reduction on one dimension if (reductionDims.size() > 1) return; - int reductionDim = reductionDims[0].getInt(); + int reductionDim = reductionDims[0]; std::array reductionOrder = layout.order[reductionDim]; std::array parallelOrder = layout.order[!reductionDim]; Value acc = reductionOp.getAcc(); diff --git a/compiler/src/iree/compiler/Codegen/SPIRV/SPIRVInitialVectorLowering.cpp b/compiler/src/iree/compiler/Codegen/SPIRV/SPIRVInitialVectorLowering.cpp index c39d4f894d69..500edacb99c2 100644 --- a/compiler/src/iree/compiler/Codegen/SPIRV/SPIRVInitialVectorLowering.cpp +++ b/compiler/src/iree/compiler/Codegen/SPIRV/SPIRVInitialVectorLowering.cpp @@ -202,9 +202,9 @@ SmallVector getNativeVectorShapeImpl(vector::MultiDimReductionOp op) { // Unroll all reduction dimensions by size 1 for vector.multi_reduction. VectorType srcVectorType = op.getSourceVectorType(); auto nativeSize = llvm::to_vector(srcVectorType.getShape()); - auto dims = op.getReductionDims().getAsValueRange(); - for (const auto &dimAttr : dims) { - nativeSize[dimAttr.getZExtValue()] = 1; + ArrayRef dims = op.getReductionDims(); + for (const int64_t dim : dims) { + nativeSize[dim] = 1; } return nativeSize; } diff --git a/compiler/src/iree/compiler/Dialect/Flow/Conversion/MeshToFlow/test/channel_creation.mlir b/compiler/src/iree/compiler/Dialect/Flow/Conversion/MeshToFlow/test/channel_creation.mlir index 4894e06c2442..a0f52b299724 100644 --- a/compiler/src/iree/compiler/Dialect/Flow/Conversion/MeshToFlow/test/channel_creation.mlir +++ b/compiler/src/iree/compiler/Dialect/Flow/Conversion/MeshToFlow/test/channel_creation.mlir @@ -6,7 +6,7 @@ module @static_1d_mesh_grouping_along_axis_0 { // CHECK-NOT: util.global private @_mesh_mesh_1d_axes_0 mesh.mesh @mesh_1d(shape = 2) util.func public @f(%arg0: tensor<1xi8>) -> tensor<1xi8> { - %0 = mesh.all_reduce %arg0 on @mesh_1d mesh_axes = [0] reduction = : tensor<1xi8> -> tensor<1xi8> + %0 = mesh.all_reduce %arg0 on @mesh_1d mesh_axes = [0] reduction = sum : tensor<1xi8> -> tensor<1xi8> util.return %0 : tensor<1xi8> } } diff --git a/compiler/src/iree/compiler/Dialect/Flow/Conversion/MeshToFlow/test/collectives.mlir b/compiler/src/iree/compiler/Dialect/Flow/Conversion/MeshToFlow/test/collectives.mlir index 53471b23a0fe..f7aa51f1075e 100644 --- a/compiler/src/iree/compiler/Dialect/Flow/Conversion/MeshToFlow/test/collectives.mlir +++ b/compiler/src/iree/compiler/Dialect/Flow/Conversion/MeshToFlow/test/collectives.mlir @@ -50,7 +50,7 @@ util.func public @all_reduce_min_non_default_channel(%arg: tensor<1xi8>) -> tens // CHECK-DAG: %[[INITIAL_VAL:.+]] = tensor.empty() : tensor<1xi8> // CHECK: %[[RES:.+]] = flow.collective.all_reduce minimum, ui8, %[[INITIAL_VAL]], %[[ARG]], %[[CHANNEL]] // CHECK-SAME: (tensor<1xi8>, tensor<1xi8>, !flow.channel) -> %[[INITIAL_VAL]] as tensor<1xi8> - %0 = mesh.all_reduce %arg on @mesh_2d mesh_axes = [1, 0] reduction = + %0 = mesh.all_reduce %arg on @mesh_2d mesh_axes = [1, 0] reduction = min : tensor<1xi8> -> tensor<1xi8> // CHECK: util.return %[[RES]] : tensor<1xi8> util.return %0 : tensor<1xi8> diff --git a/third_party/llvm-project b/third_party/llvm-project index 4369eee315d5..6b7afaa9db8f 160000 --- a/third_party/llvm-project +++ b/third_party/llvm-project @@ -1 +1 @@ -Subproject commit 4369eee315d571f4f67f19d3fd05e42d921f26c9 +Subproject commit 6b7afaa9db8f904ebf0262774e38e54b36598782