Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merge google -> main #5276

Merged
merged 20 commits into from
Apr 1, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
8455942
Merge main -> google
ScottTodd Mar 25, 2021
1006028
Integrate LLVM at llvm/llvm-project@4157a079afbf
iree-copybara-bot Mar 25, 2021
01e8cb5
Integrate LLVM at llvm/llvm-project@594e0ba96967
iree-copybara-bot Mar 26, 2021
3c3cb7c
Integrate LLVM at llvm/llvm-project@20d5c42e0ef5
iree-copybara-bot Mar 29, 2021
0a0db13
Integrate LLVM at llvm/llvm-project@482283042f79
iree-copybara-bot Mar 30, 2021
f2f173b
Integrate LLVM at llvm/llvm-project@c51e91e04681
iree-copybara-bot Mar 30, 2021
65945ba
Update benefit of numerically unstable Sigmoid legalization to zero
smit-hinsu Mar 30, 2021
20a2ba4
Integrate LLVM at llvm/llvm-project@77d81c2270c6
iree-copybara-bot Mar 30, 2021
2c9e502
Integrate LLVM at llvm/llvm-project@73adc05cedb2
iree-copybara-bot Mar 30, 2021
4fe87f3
Synchronize submodules with LLVM at llvm/llvm-project@73adc05cedb2
iree-github-actions-bot Mar 31, 2021
0a378bb
Synchronize submodules with LLVM at llvm/llvm-project@73adc05cedb2
iree-copybara-bot Mar 31, 2021
7a8867c
Integrate LLVM at llvm/llvm-project@c06a8f9caa51
iree-copybara-bot Mar 31, 2021
431ede6
Merge branch 'google' into main-to-google
KoolJBlack Mar 31, 2021
16670ba
Integrate LLVM at llvm/llvm-project@afed50a14b34
iree-copybara-bot Mar 31, 2021
fda00cf
Integrate LLVM at llvm/llvm-project@8396aeb07cdd
iree-copybara-bot Mar 31, 2021
54c8bf5
Merge pull request #5262 from KoolJBlack:main-to-google
iree-copybara-bot Mar 31, 2021
46aa337
Integrate LLVM at llvm/llvm-project@fcf680050686
iree-copybara-bot Mar 31, 2021
10ae8dc
Synchronize submodules with LLVM at llvm/llvm-project@fcf680050686
iree-github-actions-bot Mar 31, 2021
24774c5
Synchronize submodules with LLVM at llvm/llvm-project@fcf680050686
iree-copybara-bot Mar 31, 2021
1ba4e88
Merge branch 'main' into google-to-main
KoolJBlack Apr 1, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions SUBMODULE_VERSIONS.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,16 @@
4fb0ff7069bd88ee85902f4d0bb62794e5f6d021 third_party/flatcc
b1fbd33c06cdb0024c67733c6fdec2009d17b384 third_party/googletest
88b845dee001723c4a0db1fe5477de735b6d3bb0 third_party/liburing
189e771009a640214e08e855830ae6f15a83c655 third_party/llvm-bazel
1f6a57c1a0fad922e04a2b1f414b092d4b0cd8b0 third_party/llvm-project
14a6c5dcc87f1f5967628f4bdf6de7ca61272a73 third_party/llvm-bazel
fcf6800506862586e2d409aaa03a1cff818edfcc third_party/llvm-project
dde739ffd00a6fa99175cf3c0f28e4b763dc6f5f third_party/mlir-emitc
cbef26c6a8f1e4be3f4cfb902db992c45e93b7a6 third_party/mlir-hlo
7b0a6bfeeedb7ac9fa0146c1fd1dc080f43bb8eb third_party/mlir-hlo
2b2bd45bbf9be04fd22ece5cc1f54679202e9257 third_party/pffft
d8c7ee00a687ac369e62e2032514a93a9b413502 third_party/pybind11
2887692065c38ef6617f423feafc6b69dd0a0681 third_party/ruy
685f86471e9d26b3eb7676695a2e2cefb4551ae9 third_party/spirv_cross
f8bf11a0253a32375c32cad92c841237b96696c0 third_party/spirv_headers
da3da1e8a81a9866d98bcfe54eb21ec27cab7000 third_party/tensorflow
75e42f8f26b75fe5e8af8461152fd1389e1c8229 third_party/tensorflow
e35d02186d00eff26ec6c698331a0767a28a51b0 third_party/tracy
9bd3f561bcee3f01d22912de10bb07ce4e23d378 third_party/vulkan_headers
3528e2aed3e8808f33e1e7d63eeb1560456a605a third_party/vulkan_memory_allocator
Expand Down
6 changes: 2 additions & 4 deletions experimental/ModelBuilder/ModelRunner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -137,10 +137,8 @@ static void addVulkanLoweringPass(mlir::PassManager& manager) {
modulePM.addPass(mlir::spirv::createLowerABIAttributesPass());
modulePM.addPass(mlir::spirv::createUpdateVersionCapabilityExtensionPass());
manager.addPass(mlir::createConvertGpuLaunchFuncToVulkanLaunchFuncPass());
mlir::LowerToLLVMOptions llvmOptions = {
/*useBarePtrCallConv =*/false,
/*emitCWrappers = */ true,
/*indexBitwidth =*/mlir::kDeriveIndexBitwidthFromDataLayout};
mlir::LowerToLLVMOptions llvmOptions(manager.getContext());
llvmOptions.emitCWrappers = true;
manager.addPass(createLowerToLLVMPass(llvmOptions));
manager.addPass(mlir::createConvertVulkanLaunchFuncToVulkanCallsPass());
}
Expand Down
2 changes: 1 addition & 1 deletion experimental/ModelBuilder/VulkanWrapperPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ void AddVulkanLaunchWrapper::runOnOperation() {
}

LogicalResult AddVulkanLaunchWrapper::declareVulkanLaunchFunc(Location loc) {
OpBuilder builder(getOperation().getBody()->getTerminator());
auto builder = OpBuilder::atBlockEnd(getOperation().getBody());

SmallVector<Type, 8> vulkanLaunchTypes(3, builder.getIndexType());
vulkanLaunchTypes.insert(vulkanLaunchTypes.end(), args.begin(), args.end());
Expand Down
6 changes: 2 additions & 4 deletions experimental/ModelBuilder/test/BenchMatMulVectorGPU.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -102,10 +102,8 @@ static void addLoweringPasses(mlir::PassManager &pm,
mlir::spirv::createUpdateVersionCapabilityExtensionPass());

pm.addPass(mlir::createAddVulkanLaunchWrapperPass(numWorkgroups, args));
mlir::LowerToLLVMOptions llvmOptions = {
/*useBarePtrCallConv=*/false,
/*emitCWrappers=*/true,
/*indexBitwidth=*/mlir::kDeriveIndexBitwidthFromDataLayout};
mlir::LowerToLLVMOptions llvmOptions(pm.getContext());
llvmOptions.emitCWrappers = true;
pm.addPass(createLowerToLLVMPass(llvmOptions));
pm.addPass(mlir::createConvertVulkanLaunchFuncToVulkanCallsPass());
}
Expand Down
6 changes: 2 additions & 4 deletions experimental/ModelBuilder/test/TestVectorToGPU.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -76,10 +76,8 @@ static void addLoweringPasses(mlir::PassManager &pm,
mlir::spirv::createUpdateVersionCapabilityExtensionPass());

pm.addPass(mlir::createAddVulkanLaunchWrapperPass(workgroupSize, args));
mlir::LowerToLLVMOptions llvmOptions = {
/*useBarePtrCallConv=*/false,
/*emitCWrappers=*/true,
/*indexBitwidth=*/mlir::kDeriveIndexBitwidthFromDataLayout};
mlir::LowerToLLVMOptions llvmOptions(pm.getContext());
llvmOptions.emitCWrappers = true;
pm.addPass(createLowerToLLVMPass(llvmOptions));
pm.addPass(mlir::createConvertVulkanLaunchFuncToVulkanCallsPass());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,11 @@ class ConvertToMHLOPass : public PassWrapper<ConvertToMHLOPass, FunctionPass> {
target.addLegalOp<mlir::CallOp>();
target.addLegalOp<mlir::tensor::CastOp>();

// TODO(suderman): Enable logicistic op for lowering once the op is
// supported in IREE. Also, remove the numerically unstable ConvertSigmoidOp
// pattern in the legalize-tf pass.
target.addIllegalOp<mhlo::LogisticOp>();

DenseSet<Operation *> prevUnconvertedOps;
DenseSet<Operation *> unconvertedOps;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,3 +52,35 @@ func @f(%arg0: tensor<f32>, %arg1: tensor<f32>) -> (tensor<3xf32>) {
// CHECK: return [[VAL8]]
return %29 : tensor<3xf32>
}

// CHECK-LABEL: @sigmoid
func @sigmoid(%arg0: tensor<2xf32>) -> tensor<2xf32> {
// CHECK-DAG: [[HALF:%.+]] = mhlo.constant dense<5.000000e-01> : tensor<2xf32>
// CHECK-DAG: [[R1:%.+]] = mhlo.multiply %arg0, [[HALF]] : tensor<2xf32>
// CHECK-DAG: [[R2:%.+]] = "mhlo.tanh"([[R1]]) : (tensor<2xf32>) -> tensor<2xf32>
// CHECK-DAG: [[R3:%.+]] = mhlo.multiply [[R2]], [[HALF]] : tensor<2xf32>
// CHECK-DAG: [[R4:%.+]] = mhlo.add [[R3]], [[HALF]] : tensor<2xf32>
%0 = "tf.Sigmoid"(%arg0) : (tensor<2xf32>) -> tensor<2xf32>
return %0 : tensor<2xf32>
}

// CHECK-LABEL: @sigmoid_complex
func @sigmoid_complex(%arg0: tensor<2xcomplex<f32>>) -> tensor<2xcomplex<f32>> {
// CHECK: [[R0:%.+]] = mhlo.constant dense<(5.000000e-01,0.000000e+00)> : tensor<complex<f32>>
// CHECK-NOT: tf.Sigmoid
%0 = "tf.Sigmoid"(%arg0) : (tensor<2xcomplex<f32>>) -> tensor<2xcomplex<f32>>
return %0 : tensor<2xcomplex<f32>>
}

// CHECK-LABEL: @sigmoid_unranked
func @sigmoid_unranked(%arg0: tensor<*xf32>) -> tensor<*xf32> {
// CHECK-DAG: [[SCALAR:%.+]] = mhlo.constant dense<5.000000e-01> : tensor<f32>
// CHECK-DAG: [[SHAPE_VAL:%.+]] = shape.shape_of %arg0 : tensor<*xf32> -> tensor<?xindex>
// CHECK-DAG: [[HALF:%.+]] = "mhlo.dynamic_broadcast_in_dim"([[SCALAR]], [[SHAPE_VAL]]) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>, tensor<?xindex>) -> tensor<*xf32>
// CHECK-DAG: [[R1:%.+]] = mhlo.multiply %arg0, [[HALF]] : tensor<*xf32>
// CHECK-DAG: [[R2:%.+]] = "mhlo.tanh"([[R1]]) : (tensor<*xf32>) -> tensor<*xf32>
// CHECK-DAG: [[R3:%.+]] = mhlo.multiply [[R2]], [[HALF]] : tensor<*xf32>
// CHECK-DAG: [[R4:%.+]] = mhlo.add [[R3]], [[HALF]] : tensor<*xf32>
%0 = "tf.Sigmoid"(%arg0) : (tensor<*xf32>) -> tensor<*xf32>
return %0 : tensor<*xf32>
}
Original file line number Diff line number Diff line change
Expand Up @@ -89,8 +89,8 @@ struct FoldReshapeIntoInterfaceTensorLoad

// Removes operations with Allocate MemoryEffects but no uses.
struct RemoveDeadMemAllocs : RewritePattern {
RemoveDeadMemAllocs(PatternBenefit benefit = 1)
: RewritePattern(benefit, MatchAnyOpTypeTag()) {}
RemoveDeadMemAllocs(MLIRContext *context, PatternBenefit benefit = 1)
: RewritePattern(MatchAnyOpTypeTag(), benefit, context) {}

LogicalResult matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const override {
Expand All @@ -109,8 +109,8 @@ struct BufferAllocViewCleanUpPass
: public PassWrapper<BufferAllocViewCleanUpPass, FunctionPass> {
void runOnFunction() override {
OwningRewritePatternList patterns(&getContext());
patterns.insert<FoldReshapeIntoInterfaceTensorLoad>(&getContext());
patterns.insert<RemoveDeadMemAllocs>();
patterns.insert<FoldReshapeIntoInterfaceTensorLoad, RemoveDeadMemAllocs>(
&getContext());
(void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
}
};
Expand Down
12 changes: 7 additions & 5 deletions iree/compiler/Conversion/Common/LinalgBufferizePass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -329,12 +329,14 @@ static LogicalResult convertTensorReshapeOp(
resultTensorType, {}, inputBufferType.getMemorySpaceAsInt());
Value bufferReshape = b.create<linalg::ReshapeOp>(
loc, reshapeResultType, reshapeSrc, op.reassociation());
auto allocationDynamicSizes = linalg::getReshapeOutputShapeFromInputShape(
b, loc, inputBuffer, resultTensorType.getShape(),
op.getReassociationMaps());
SmallVector<SmallVector<Value>> reshapeResultShape;
if (failed(op.reifyReturnTypeShapesPerResultDim(b, reshapeResultShape)) ||
reshapeResultShape.size() != 1) {
return op.emitError("failed to get shape of result");
}
return createAliasingBufferOrAllocationForResult(
b, loc, allocationFn, srcTensor, bufferReshape, resultTensor,
allocationDynamicSizes, bvm);
reshapeResultShape[0], bvm);
}

static SmallVector<int64_t, 4> extractFromI64ArrayAttr(ArrayAttr attr) {
Expand Down Expand Up @@ -460,7 +462,7 @@ static LogicalResult convertTransferOp(OpBuilder &b,
b.create<vector::TransferWriteOp>(
loc, writeOp.vector(), newInputBuffer, writeOp.indices(),
writeOp.permutation_map(),
writeOp.masked() ? *writeOp.masked() : ArrayAttr());
writeOp.in_bounds() ? *writeOp.in_bounds() : ArrayAttr());
}
return success();
}
Expand Down
1 change: 0 additions & 1 deletion iree/compiler/Conversion/Common/Passes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ void addLinalgBufferizePasses(OpPassManager &passManager,
passManager.addNestedPass<FuncOp>(createCanonicalizerPass());
passManager.addNestedPass<FuncOp>(createCSEPass());
passManager.addNestedPass<FuncOp>(createBufferAllocViewCleanUpPass());
passManager.addPass(createCopyRemovalPass());
// passManager.addPass(createBufferHoistingPass());
// TODO(nicolasvasilache): bug in buffer loop hoisting with
// dynamic_linalg_matmul_on_tensors_fuse_0.mlir
Expand Down
26 changes: 13 additions & 13 deletions iree/compiler/Conversion/Common/test/linalg_bufferize.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -278,25 +278,25 @@ hal.interface @legacy_io attributes {sym_visibility = "private"} {
// %4 = flow.dispatch.tensor.load %0, offsets = [%c0, %c0], sizes = [%c1, %c3], strides = [%c1, %c1] : !flow.dispatch.tensor<readonly:2x3xf32> -> tensor<2x3xf32>
// %5 = flow.dispatch.tensor.load %1, offsets = [%c0, %c0], sizes = [%c3, %c1], strides = [%c1, %c1] : !flow.dispatch.tensor<readonly:3x4xf32> -> tensor<3x1xf32>
// %6 = flow.dispatch.tensor.load %2, offsets = [%c0, %c0], sizes = [%c1, %c1], strides = [%c1, %c1] : !flow.dispatch.tensor<readonly:2x4xf32> -> tensor<2x1xf32>
// %7 = vector.transfer_read %4[%c0, %c0], %cst {masked = [false, false]} : tensor<2x3xf32>, vector<1x1xf32>
// %8 = vector.transfer_read %4[%c0, %c1], %cst {masked = [false, false]} : tensor<2x3xf32>, vector<1x1xf32>
// %9 = vector.transfer_read %4[%c0, %c2], %cst {masked = [false, false]} : tensor<2x3xf32>, vector<1x1xf32>
// %10 = vector.transfer_read %4[%c1, %c0], %cst {masked = [false, false]} : tensor<2x3xf32>, vector<1x1xf32>
// %11 = vector.transfer_read %4[%c1, %c1], %cst {masked = [false, false]} : tensor<2x3xf32>, vector<1x1xf32>
// %12 = vector.transfer_read %4[%c1, %c2], %cst {masked = [false, false]} : tensor<2x3xf32>, vector<1x1xf32>
// %13 = vector.transfer_read %5[%c0, %c0], %cst {masked = [false, false]} : tensor<3x1xf32>, vector<1x1xf32>
// %14 = vector.transfer_read %5[%c1, %c0], %cst {masked = [false, false]} : tensor<3x1xf32>, vector<1x1xf32>
// %15 = vector.transfer_read %5[%c2, %c0], %cst {masked = [false, false]} : tensor<3x1xf32>, vector<1x1xf32>
// %16 = vector.transfer_read %6[%c0, %c0], %cst {masked = [false, false]} : tensor<2x1xf32>, vector<1x1xf32>
// %17 = vector.transfer_read %6[%c1, %c0], %cst {masked = [false, false]} : tensor<2x1xf32>, vector<1x1xf32>
// %7 = vector.transfer_read %4[%c0, %c0], %cst {in_bounds = [true, true]} : tensor<2x3xf32>, vector<1x1xf32>
// %8 = vector.transfer_read %4[%c0, %c1], %cst {in_bounds = [true, true]} : tensor<2x3xf32>, vector<1x1xf32>
// %9 = vector.transfer_read %4[%c0, %c2], %cst {in_bounds = [true, true]} : tensor<2x3xf32>, vector<1x1xf32>
// %10 = vector.transfer_read %4[%c1, %c0], %cst {in_bounds = [true, true]} : tensor<2x3xf32>, vector<1x1xf32>
// %11 = vector.transfer_read %4[%c1, %c1], %cst {in_bounds = [true, true]} : tensor<2x3xf32>, vector<1x1xf32>
// %12 = vector.transfer_read %4[%c1, %c2], %cst {in_bounds = [true, true]} : tensor<2x3xf32>, vector<1x1xf32>
// %13 = vector.transfer_read %5[%c0, %c0], %cst {in_bounds = [true, true]} : tensor<3x1xf32>, vector<1x1xf32>
// %14 = vector.transfer_read %5[%c1, %c0], %cst {in_bounds = [true, true]} : tensor<3x1xf32>, vector<1x1xf32>
// %15 = vector.transfer_read %5[%c2, %c0], %cst {in_bounds = [true, true]} : tensor<3x1xf32>, vector<1x1xf32>
// %16 = vector.transfer_read %6[%c0, %c0], %cst {in_bounds = [true, true]} : tensor<2x1xf32>, vector<1x1xf32>
// %17 = vector.transfer_read %6[%c1, %c0], %cst {in_bounds = [true, true]} : tensor<2x1xf32>, vector<1x1xf32>
// %18 = vector.contract {indexing_maps = [#map0, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} %7, %13, %16 : vector<1x1xf32>, vector<1x1xf32> into vector<1x1xf32>
// %19 = vector.contract {indexing_maps = [#map0, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} %8, %14, %18 : vector<1x1xf32>, vector<1x1xf32> into vector<1x1xf32>
// %20 = vector.contract {indexing_maps = [#map0, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} %9, %15, %19 : vector<1x1xf32>, vector<1x1xf32> into vector<1x1xf32>
// %21 = vector.contract {indexing_maps = [#map0, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} %10, %13, %17 : vector<1x1xf32>, vector<1x1xf32> into vector<1x1xf32>
// %22 = vector.contract {indexing_maps = [#map0, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} %11, %14, %21 : vector<1x1xf32>, vector<1x1xf32> into vector<1x1xf32>
// %23 = vector.contract {indexing_maps = [#map0, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} %12, %15, %22 : vector<1x1xf32>, vector<1x1xf32> into vector<1x1xf32>
// %24 = vector.transfer_write %20, %6[%c0, %c0] {masked = [false, false]} : vector<1x1xf32>, tensor<2x1xf32>
// %25 = vector.transfer_write %23, %24[%c1, %c0] {masked = [false, false]} : vector<1x1xf32>, tensor<2x1xf32>
// %24 = vector.transfer_write %20, %6[%c0, %c0] {in_bounds = [true, true]} : vector<1x1xf32>, tensor<2x1xf32>
// %25 = vector.transfer_write %23, %24[%c1, %c0] {in_bounds = [true, true]} : vector<1x1xf32>, tensor<2x1xf32>
// flow.dispatch.tensor.store %25, %3, offsets = [%c0, %c0], sizes = [%c1, %c1], strides = [%c1, %c1] : tensor<2x1xf32> -> !flow.dispatch.tensor<writeonly:2x4xf32>
// return
// }
Expand Down
2 changes: 1 addition & 1 deletion iree/compiler/Conversion/HLOToHLO/DemoteF32ToF16.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ class FloatTypeConverter : public TypeConverter {
class GenericTypeConvert : public ConversionPattern {
public:
GenericTypeConvert(MLIRContext *context, TypeConverter &converter)
: ConversionPattern(0, converter, MatchAnyOpTypeTag()) {}
: ConversionPattern(converter, MatchAnyOpTypeTag(), 0, context) {}
LogicalResult matchAndRewrite(
Operation *op, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const override {
Expand Down
2 changes: 1 addition & 1 deletion iree/compiler/Conversion/LinalgToLLVM/ConvertToLLVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -700,7 +700,7 @@ void ConvertToLLVMPass::runOnOperation() {
LLVMConversionTarget target(getContext());
// IREE::HAL::InterfaceOp will be removed after successful conversion of the
// rest of the IR.
target.addLegalOp<ModuleOp, ModuleTerminatorOp, IREE::HAL::InterfaceOp,
target.addLegalOp<ModuleOp, IREE::HAL::InterfaceOp,
IREE::HAL::InterfaceBindingOp, IREE::HAL::InterfaceEndOp>();
target.addIllegalDialect<ShapeDialect, StandardOpsDialect, IREEDialect,
IREE::HAL::HALDialect, math::MathDialect>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,9 @@ namespace {
// that is always templated on an op.
struct TileWorkgroups : public linalg::LinalgBaseTilingPattern {
using Base = linalg::LinalgBaseTilingPattern;
TileWorkgroups(linalg::LinalgTilingOptions options,
TileWorkgroups(MLIRContext *context, linalg::LinalgTilingOptions options,
linalg::LinalgTransformationFilter marker)
: LinalgBaseTilingPattern(options, marker) {}
: LinalgBaseTilingPattern(context, options, marker) {}
LogicalResult matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const override {
auto contractionOp = dyn_cast<linalg::ContractionOpInterface>(op);
Expand Down Expand Up @@ -153,6 +153,7 @@ void TileAndVectorizeWorkgroups::runOnFunction() {
// First level of tiling patterns. (workgroups memory)
OwningRewritePatternList l1patterns(&getContext());
l1patterns.insert<TileWorkgroups>(
context,
linalg::LinalgTilingOptions().setTileSizeComputationFunction(
[](OpBuilder &builder,
Operation *operation) -> SmallVector<Value, 4> {
Expand All @@ -175,6 +176,7 @@ void TileAndVectorizeWorkgroups::runOnFunction() {
{
OwningRewritePatternList l2patterns(&getContext());
l2patterns.insert<TileWorkgroups>(
context,
linalg::LinalgTilingOptions().setTileSizeComputationFunction(
[](OpBuilder &builder,
Operation *operation) -> SmallVector<Value, 4> {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@
// CHECK: %[[D3:.*]] = vector.contract {{.*}} %[[V3]], %[[V6]], %[[VA]] : vector<1x1xf32>, vector<1x1xf32> into vector<1x1xf32>
// CHECK: %[[D4:.*]] = vector.contract {{.*}} %[[V4]], %[[V7]], %[[D3]] : vector<1x1xf32>, vector<1x1xf32> into vector<1x1xf32>
// CHECK: %[[D5:.*]] = vector.contract {{.*}} %[[V5]], %[[V8]], %[[D4]] : vector<1x1xf32>, vector<1x1xf32> into vector<1x1xf32>
// CHECK: %[[W0:.*]] = vector.transfer_write %[[D2]], %[[I2]][%[[C0]], %[[C0]]] {masked = [false, false]} : vector<1x1xf32>, tensor<2x1xf32>
// CHECK: %[[W1:.*]] = vector.transfer_write %[[D5]], %[[W0]][%[[C1]], %[[C0]]] {masked = [false, false]} : vector<1x1xf32>, tensor<2x1xf32>
// CHECK: %[[W0:.*]] = vector.transfer_write %[[D2]], %[[I2]][%[[C0]], %[[C0]]] {in_bounds = [true, true]} : vector<1x1xf32>, tensor<2x1xf32>
// CHECK: %[[W1:.*]] = vector.transfer_write %[[D5]], %[[W0]][%[[C1]], %[[C0]]] {in_bounds = [true, true]} : vector<1x1xf32>, tensor<2x1xf32>
// CHECK: flow.dispatch.tensor.store %[[W1]]

func @tensor_dispatch_0() {
Expand Down
6 changes: 2 additions & 4 deletions iree/compiler/Conversion/LinalgToNVVM/ConvertToNVVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -175,10 +175,8 @@ struct ConvertToNVVMPass
ModuleOp m = getOperation();

/// Customize the bitwidth used for the device side index computations.
LowerToLLVMOptions options = {/*useBarePtrCallConv =*/false,
/*emitCWrappers =*/false,
/*indexBitwidth =*/64,
/*useAlignedAlloc =*/false};
LowerToLLVMOptions options(m.getContext(), DataLayout(m));
options.overrideIndexBitwidth(64);
LLVMTypeConverter converter(m.getContext(), options);
// Apply in-dialect lowering first. In-dialect lowering will replace ops
// which need to be lowered further, which is not supported by a single
Expand Down
8 changes: 4 additions & 4 deletions iree/compiler/Conversion/LinalgToSPIRV/ConvertToSPIRVPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -347,10 +347,10 @@ class TransferToCoopMatLoadStore final : public CoopMatOpLowering<OpTy> {
// TODO(thomasraoux): use coloumn major operand when TransfertRead +
// TransposeOp.
if (!op.permutation_map().isMinorIdentity()) return failure();
if (op.masked() &&
llvm::any_of(op.masked()->template cast<ArrayAttr>(),
[](mlir::Attribute maskedDim) {
return maskedDim.cast<BoolAttr>().getValue();
if (op.in_bounds() &&
llvm::any_of(op.in_bounds()->template cast<ArrayAttr>(),
[](mlir::Attribute dimInBounds) {
return !dimInBounds.cast<BoolAttr>().getValue();
}))
return failure();
auto matType = spirv::CooperativeMatrixNVType::get(
Expand Down
2 changes: 1 addition & 1 deletion iree/compiler/Conversion/LinalgToSPIRV/VectorToGPUPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,7 @@ class VectorContractLowering : public OpRewritePattern<vector::ContractionOp> {
class ElementwiseLowering : public RewritePattern {
public:
ElementwiseLowering(MLIRContext *context)
: RewritePattern(0, MatchAnyOpTypeTag()) {}
: RewritePattern(MatchAnyOpTypeTag(), 0, context) {}

LogicalResult matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const override {
Expand Down
Loading